| 13 1 54 54 53 53 1 1 1 53 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 | // SPDX-License-Identifier: GPL-2.0-or-later /* * KVM paravirt_ops implementation * * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright IBM Corporation, 2007 * Authors: Anthony Liguori <aliguori@us.ibm.com> */ #define pr_fmt(fmt) "kvm-guest: " fmt #include <linux/context_tracking.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/kvm_para.h> #include <linux/cpu.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hardirq.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/swait.h> #include <linux/syscore_ops.h> #include <linux/cc_platform.h> #include <linux/efi.h> #include <linux/kvm_types.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/tlbflush.h> #include <asm/apic.h> #include <asm/apicdef.h> #include <asm/hypervisor.h> #include <asm/mtrr.h> #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> #include <asm/msr.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/svm.h> #include <asm/e820/api.h> DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled); static int kvmapf = 1; static int __init parse_no_kvmapf(char *arg) { kvmapf = 0; return 0; } early_param("no-kvmapf", parse_no_kvmapf); static int steal_acc = 1; static int __init parse_no_stealacc(char *arg) { steal_acc = 0; return 0; } early_param("no-steal-acc", parse_no_stealacc); static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled); static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; static int has_steal_clock = 0; static int has_guest_poll = 0; /* * No need for any "IO delay" on KVM */ static void kvm_io_delay(void) { } #define KVM_TASK_SLEEP_HASHBITS 8 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) struct kvm_task_sleep_node { struct hlist_node link; struct swait_queue_head wq; u32 token; int cpu; bool dummy; }; static struct kvm_task_sleep_head { raw_spinlock_t lock; struct hlist_head list; } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, u32 token) { struct hlist_node *p; hlist_for_each(p, &b->list) { struct kvm_task_sleep_node *n = hlist_entry(p, typeof(*n), link); if (n->token == token) return n; } return NULL; } static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node *e; raw_spin_lock(&b->lock); e = _find_apf_task(b, token); if (e) { struct kvm_task_sleep_node *dummy = NULL; /* * The entry can either be a 'dummy' entry (which is put on the * list when wake-up happens ahead of APF handling completion) * or a token from another task which should not be touched. */ if (e->dummy) { hlist_del(&e->link); dummy = e; } raw_spin_unlock(&b->lock); kfree(dummy); return false; } n->token = token; n->cpu = smp_processor_id(); n->dummy = false; init_swait_queue_head(&n->wq); hlist_add_head(&n->link, &b->list); raw_spin_unlock(&b->lock); return true; } /* * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled * @token: Token to identify the sleep node entry * * Invoked from the async pagefault handling code or from the VM exit page * fault handler. In both cases RCU is watching. */ void kvm_async_pf_task_wait_schedule(u32 token) { struct kvm_task_sleep_node n; DECLARE_SWAITQUEUE(wait); lockdep_assert_irqs_disabled(); if (!kvm_async_pf_queue_task(token, &n)) return; for (;;) { prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); if (hlist_unhashed(&n.link)) break; local_irq_enable(); schedule(); local_irq_disable(); } finish_swait(&n.wq, &wait); } EXPORT_SYMBOL_FOR_KVM(kvm_async_pf_task_wait_schedule); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); if (swq_has_sleeper(&n->wq)) swake_up_one(&n->wq); } static void apf_task_wake_all(void) { int i; for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; struct kvm_task_sleep_node *n; struct hlist_node *p, *next; raw_spin_lock(&b->lock); hlist_for_each_safe(p, next, &b->list) { n = hlist_entry(p, typeof(*n), link); if (n->cpu == smp_processor_id()) apf_task_wake_one(n); } raw_spin_unlock(&b->lock); } } static void kvm_async_pf_task_wake(u32 token) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node *n, *dummy = NULL; if (token == ~0) { apf_task_wake_all(); return; } again: raw_spin_lock(&b->lock); n = _find_apf_task(b, token); if (!n) { /* * Async #PF not yet handled, add a dummy entry for the token. * Allocating the token must be down outside of the raw lock * as the allocator is preemptible on PREEMPT_RT kernels. */ if (!dummy) { raw_spin_unlock(&b->lock); dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC); /* * Continue looping on allocation failure, eventually * the async #PF will be handled and allocating a new * node will be unnecessary. */ if (!dummy) cpu_relax(); /* * Recheck for async #PF completion before enqueueing * the dummy token to avoid duplicate list entries. */ goto again; } dummy->token = token; dummy->cpu = smp_processor_id(); dummy->dummy = true; init_swait_queue_head(&dummy->wq); hlist_add_head(&dummy->link, &b->list); dummy = NULL; } else { apf_task_wake_one(n); } raw_spin_unlock(&b->lock); /* A dummy token might be allocated and ultimately not used. */ kfree(dummy); } noinstr u32 kvm_read_and_reset_apf_flags(void) { u32 flags = 0; if (__this_cpu_read(async_pf_enabled)) { flags = __this_cpu_read(apf_reason.flags); __this_cpu_write(apf_reason.flags, 0); } return flags; } EXPORT_SYMBOL_FOR_KVM(kvm_read_and_reset_apf_flags); noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) { u32 flags = kvm_read_and_reset_apf_flags(); irqentry_state_t state; if (!flags) return false; state = irqentry_enter(regs); instrumentation_begin(); /* * If the host managed to inject an async #PF into an interrupt * disabled region, then die hard as this is not going to end well * and the host side is seriously broken. */ if (unlikely(!(regs->flags & X86_EFLAGS_IF))) panic("Host injected async #PF in interrupt disabled region\n"); if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { if (unlikely(!(user_mode(regs)))) panic("Host injected async #PF in kernel mode\n"); /* Page is swapped out by the host. */ kvm_async_pf_task_wait_schedule(token); } else { WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags); } instrumentation_end(); irqentry_exit(regs, state); return true; } DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); u32 token; apic_eoi(); inc_irq_stat(irq_hv_callback_count); if (__this_cpu_read(async_pf_enabled)) { token = __this_cpu_read(apf_reason.token); kvm_async_pf_task_wake(token); __this_cpu_write(apf_reason.token, 0); wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1); } set_irq_regs(old_regs); } static void __init paravirt_ops_setup(void) { pv_info.name = "KVM"; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) pv_ops.cpu.io_delay = kvm_io_delay; #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif } static void kvm_register_steal_time(void) { int cpu = smp_processor_id(); struct kvm_steal_time *st = &per_cpu(steal_time, cpu); if (!has_steal_clock) return; wrmsrq(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); pr_debug("stealtime: cpu %d, msr %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); } static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; static notrace __maybe_unused void kvm_guest_apic_eoi_write(void) { /** * This relies on __test_and_clear_bit to modify the memory * in a way that is atomic with respect to the local CPU. * The hypervisor only accesses this memory from the local CPU so * there's no need for lock or memory barriers. * An optimization barrier is implied in apic write. */ if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) return; apic_native_eoi(); } static void kvm_guest_cpu_init(void) { if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) { u64 pa; WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled)); pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; wrmsrq(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); wrmsrq(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(async_pf_enabled, true); pr_debug("setup async PF for cpu %d\n", smp_processor_id()); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { unsigned long pa; /* Size alignment is implied but just to make it explicit. */ BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); __this_cpu_write(kvm_apic_eoi, 0); pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) | KVM_MSR_ENABLED; wrmsrq(MSR_KVM_PV_EOI_EN, pa); } if (has_steal_clock) kvm_register_steal_time(); } static void kvm_pv_disable_apf(void) { if (!__this_cpu_read(async_pf_enabled)) return; wrmsrq(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(async_pf_enabled, false); pr_debug("disable async PF for cpu %d\n", smp_processor_id()); } static void kvm_disable_steal_time(void) { if (!has_steal_clock) return; wrmsrq(MSR_KVM_STEAL_TIME, 0); } static u64 kvm_steal_clock(int cpu) { u64 steal; struct kvm_steal_time *src; int version; src = &per_cpu(steal_time, cpu); do { version = src->version; virt_rmb(); steal = src->steal; virt_rmb(); } while ((version & 1) || (version != src->version)); return steal; } static inline __init void __set_percpu_decrypted(void *ptr, unsigned long size) { early_set_memory_decrypted((unsigned long) ptr, size); } /* * Iterate through all possible CPUs and map the memory region pointed * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once. * * Note: we iterate through all possible CPUs to ensure that CPUs * hotplugged will have their per-cpu variable already mapped as * decrypted. */ static void __init sev_map_percpu_data(void) { int cpu; if (cc_vendor != CC_VENDOR_AMD || !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; for_each_possible_cpu(cpu) { __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason)); __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time)); __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi)); } } static void kvm_guest_cpu_offline(bool shutdown) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrq(MSR_KVM_PV_EOI_EN, 0); if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); kvmclock_disable(); } static int kvm_cpu_online(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_init(); local_irq_restore(flags); return 0; } #ifdef CONFIG_SMP static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); static bool pv_tlb_flush_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && !boot_cpu_has(X86_FEATURE_MWAIT) && (num_possible_cpus() != 1)); } static bool pv_ipi_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) && (num_possible_cpus() != 1)); } static bool pv_sched_yield_supported(void) { return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && !boot_cpu_has(X86_FEATURE_MWAIT) && (num_possible_cpus() != 1)); } #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) static void __send_ipi_mask(const struct cpumask *mask, int vector) { unsigned long flags; int cpu, min = 0, max = 0; #ifdef CONFIG_X86_64 __uint128_t ipi_bitmap = 0; #else u64 ipi_bitmap = 0; #endif u32 apic_id, icr; long ret; if (cpumask_empty(mask)) return; local_irq_save(flags); switch (vector) { default: icr = APIC_DM_FIXED | vector; break; case NMI_VECTOR: icr = APIC_DM_NMI; break; } for_each_cpu(cpu, mask) { apic_id = per_cpu(x86_cpu_to_apicid, cpu); if (!ipi_bitmap) { min = max = apic_id; } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { ipi_bitmap <<= min - apic_id; min = apic_id; } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", ret); min = max = apic_id; ipi_bitmap = 0; } __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); } if (ipi_bitmap) { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", ret); } local_irq_restore(flags); } static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) { __send_ipi_mask(mask, vector); } static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); const struct cpumask *local_mask; cpumask_copy(new_mask, mask); cpumask_clear_cpu(this_cpu, new_mask); local_mask = new_mask; __send_ipi_mask(local_mask, vector); } static int __init setup_efi_kvm_sev_migration(void) { efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled"; efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID; efi_status_t status; unsigned long size; bool enabled; if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) || !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) return 0; if (!efi_enabled(EFI_BOOT)) return 0; if (!efi_enabled(EFI_RUNTIME_SERVICES)) { pr_info("%s : EFI runtime services are not enabled\n", __func__); return 0; } size = sizeof(enabled); /* Get variable contents into buffer */ status = efi.get_variable(efi_sev_live_migration_enabled, &efi_variable_guid, NULL, &size, &enabled); if (status == EFI_NOT_FOUND) { pr_info("%s : EFI live migration variable not found\n", __func__); return 0; } if (status != EFI_SUCCESS) { pr_info("%s : EFI variable retrieval failed\n", __func__); return 0; } if (enabled == 0) { pr_info("%s: live migration disabled in EFI\n", __func__); return 0; } pr_info("%s : live migration enabled in EFI\n", __func__); wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); return 1; } late_initcall(setup_efi_kvm_sev_migration); /* * Set the IPI entry points */ static __init void kvm_setup_pv_ipi(void) { apic_update_callback(send_IPI_mask, kvm_send_ipi_mask); apic_update_callback(send_IPI_mask_allbutself, kvm_send_ipi_mask_allbutself); pr_info("setup PV IPIs\n"); } static void kvm_smp_send_call_func_ipi(const struct cpumask *mask) { int cpu; native_send_call_func_ipi(mask); /* Make sure other vCPUs get a chance to run if they need to. */ for_each_cpu(cpu, mask) { if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) { kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu)); break; } } } static void kvm_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info) { u8 state; int cpu; struct kvm_steal_time *src; struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); cpumask_copy(flushmask, cpumask); /* * We have to call flush only on online vCPUs. And * queue flush_on_enter for pre-empted vCPUs */ for_each_cpu(cpu, flushmask) { /* * The local vCPU is never preempted, so we do not explicitly * skip check for local vCPU - it will never be cleared from * flushmask. */ src = &per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { if (try_cmpxchg(&src->preempted, &state, state | KVM_VCPU_FLUSH_TLB)) __cpumask_clear_cpu(cpu, flushmask); } } native_flush_tlb_multi(flushmask, info); } static __init int kvm_alloc_cpumask(void) { int cpu; if (!kvm_para_available() || nopv) return 0; if (pv_tlb_flush_supported() || pv_ipi_supported()) for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); } return 0; } arch_initcall(kvm_alloc_cpumask); static void __init kvm_smp_prepare_boot_cpu(void) { /* * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() * shares the guest physical address with the hypervisor. */ sev_map_percpu_data(); kvm_guest_cpu_init(); native_smp_prepare_boot_cpu(); kvm_spinlock_init(); } static int kvm_cpu_down_prepare(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_offline(false); local_irq_restore(flags); return 0; } #endif static int kvm_suspend(void *data) { u64 val = 0; kvm_guest_cpu_offline(false); #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) rdmsrq(MSR_KVM_POLL_CONTROL, val); has_guest_poll = !(val & 1); #endif return 0; } static void kvm_resume(void *data) { kvm_cpu_online(raw_smp_processor_id()); #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) wrmsrq(MSR_KVM_POLL_CONTROL, 0); #endif } static const struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static struct syscore kvm_syscore = { .ops = &kvm_syscore_ops, }; static void kvm_pv_guest_cpu_reboot(void *unused) { kvm_guest_cpu_offline(true); } static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; /* * After a PV feature is registered, the host will keep writing to the * registered memory location. If the guest happens to shutdown, this memory * won't be valid. In cases like kexec, in which you install a new kernel, this * means a random memory location will be kept being written. */ #ifdef CONFIG_CRASH_DUMP static void kvm_crash_shutdown(struct pt_regs *regs) { kvm_guest_cpu_offline(true); native_machine_crash_shutdown(regs); } #endif #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP) bool __kvm_vcpu_is_preempted(long cpu); __visible bool __kvm_vcpu_is_preempted(long cpu) { struct kvm_steal_time *src = &per_cpu(steal_time, cpu); return !!(src->preempted & KVM_VCPU_PREEMPTED); } PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); #else #include <asm/asm-offsets.h> extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); /* * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and * restoring to/from the stack. */ #define PV_VCPU_PREEMPTED_ASM \ "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \ "setne %al\n\t" DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted, PV_VCPU_PREEMPTED_ASM, .text); #endif static void __init kvm_guest_init(void) { int i; paravirt_ops_setup(); register_reboot_notifier(&kvm_pv_reboot_nb); for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) raw_spin_lock_init(&async_pf_sleepers[i].lock); if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; static_call_update(pv_steal_clock, kvm_steal_clock); pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) apic_update_callback(eoi, kvm_guest_apic_eoi_write); if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) { static_branch_enable(&kvm_async_pf_enabled); sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_kvm_asyncpf_interrupt); } #ifdef CONFIG_SMP if (pv_tlb_flush_supported()) { pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; pr_info("KVM setup pv remote TLB flush\n"); } smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; if (pv_sched_yield_supported()) { smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; pr_info("setup PV sched yield\n"); } if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online", kvm_cpu_online, kvm_cpu_down_prepare) < 0) pr_err("failed to install cpu hotplug callbacks\n"); #else sev_map_percpu_data(); kvm_guest_cpu_init(); #endif #ifdef CONFIG_CRASH_DUMP machine_ops.crash_shutdown = kvm_crash_shutdown; #endif register_syscore(&kvm_syscore); /* * Hard lockup detection is enabled by default. Disable it, as guests * can get false positives too easily, for example if the host is * overcommitted. */ hardlockup_detector_disable(); } static noinline uint32_t __kvm_cpuid_base(void) { if (boot_cpu_data.cpuid_level < 0) return 0; /* So we don't blow up on old processors */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return cpuid_base_hypervisor(KVM_SIGNATURE, 0); return 0; } static inline uint32_t kvm_cpuid_base(void) { static int kvm_cpuid_base = -1; if (kvm_cpuid_base == -1) kvm_cpuid_base = __kvm_cpuid_base(); return kvm_cpuid_base; } bool kvm_para_available(void) { return kvm_cpuid_base() != 0; } EXPORT_SYMBOL_GPL(kvm_para_available); unsigned int kvm_arch_para_features(void) { return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); } unsigned int kvm_arch_para_hints(void) { return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); } EXPORT_SYMBOL_GPL(kvm_arch_para_hints); static uint32_t __init kvm_detect(void) { return kvm_cpuid_base(); } static void __init kvm_apic_init(void) { #ifdef CONFIG_SMP if (pv_ipi_supported()) kvm_setup_pv_ipi(); #endif } static bool __init kvm_msi_ext_dest_id(void) { return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID); } static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) { kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages, KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); } static void __init kvm_init_platform(void) { u64 tolud = PFN_PHYS(e820__end_of_low_ram_pfn()); /* * Note, hardware requires variable MTRR ranges to be power-of-2 sized * and naturally aligned. But when forcing guest MTRR state, Linux * doesn't program the forced ranges into hardware. Don't bother doing * the math to generate a technically-legal range. */ struct mtrr_var_range pci_hole = { .base_lo = tolud | X86_MEMTYPE_UC, .mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V, .mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32, }; if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { unsigned long nr_pages; int i; pv_ops.mmu.notify_page_enc_status_changed = kvm_sev_hc_page_enc_status; /* * Reset the host's shared pages list related to kernel * specific page encryption status settings before we load a * new kernel by kexec. Reset the page encryption status * during early boot instead of just before kexec to avoid SMP * races during kvm_pv_guest_cpu_reboot(). * NOTE: We cannot reset the complete shared pages list * here as we need to retain the UEFI/OVMF firmware * specific settings. */ for (i = 0; i < e820_table->nr_entries; i++) { struct e820_entry *entry = &e820_table->entries[i]; if (entry->type != E820_TYPE_RAM) continue; nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, nr_pages, KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); } /* * Ensure that _bss_decrypted section is marked as decrypted in the * shared pages list. */ early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted, __end_bss_decrypted - __start_bss_decrypted, 0); /* * If not booted using EFI, enable Live migration support. */ if (!efi_enabled(EFI_BOOT)) wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); } kvmclock_init(); x86_platform.apic_post_init = kvm_apic_init; /* * Set WB as the default cache mode for SEV-SNP and TDX, with a single * UC range for the legacy PCI hole, e.g. so that devices that expect * to get UC/WC mappings don't get surprised with WB. */ guest_force_mtrr_state(&pci_hole, 1, MTRR_TYPE_WRBACK); } #if defined(CONFIG_AMD_MEM_ENCRYPT) static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) { /* RAX and CPL are already in the GHCB */ ghcb_set_rbx(ghcb, regs->bx); ghcb_set_rcx(ghcb, regs->cx); ghcb_set_rdx(ghcb, regs->dx); ghcb_set_rsi(ghcb, regs->si); } static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) { /* No checking of the return state needed */ return true; } #endif const __initconst struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, .type = X86_HYPER_KVM, .init.guest_late_init = kvm_guest_init, .init.x2apic_available = kvm_para_available, .init.msi_ext_dest_id = kvm_msi_ext_dest_id, .init.init_platform = kvm_init_platform, #if defined(CONFIG_AMD_MEM_ENCRYPT) .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare, .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish, #endif }; static __init int activate_jump_labels(void) { if (has_steal_clock) { static_key_slow_inc(¶virt_steal_enabled); if (steal_acc) static_key_slow_inc(¶virt_steal_rq_enabled); } return 0; } arch_initcall(activate_jump_labels); #ifdef CONFIG_PARAVIRT_SPINLOCKS /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ static void kvm_kick_cpu(int cpu) { unsigned long flags = 0; u32 apicid; apicid = per_cpu(x86_cpu_to_apicid, cpu); kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); } #include <asm/qspinlock.h> static void kvm_wait(u8 *ptr, u8 val) { if (in_nmi()) return; /* * halt until it's our turn and kicked. Note that we do safe halt * for irq enabled case to avoid hang when lock info is overwritten * in irq spinlock slowpath and no spurious interrupt occur to save us. */ if (irqs_disabled()) { if (READ_ONCE(*ptr) == val) halt(); } else { local_irq_disable(); /* safe_halt() will enable IRQ */ if (READ_ONCE(*ptr) == val) safe_halt(); else local_irq_enable(); } } /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. */ void __init kvm_spinlock_init(void) { /* * Disable PV spinlocks and use native qspinlock when dedicated pCPUs * are available. */ if (kvm_para_has_hint(KVM_HINTS_REALTIME)) { pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n"); goto out; } if (num_possible_cpus() == 1) { pr_info("PV spinlocks disabled, single CPU\n"); goto out; } if (nopvspin) { pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n"); goto out; } /* * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is * preferred over native qspinlock when vCPU is preempted. */ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { pr_info("PV spinlocks disabled, no host support\n"); return; } pr_info("PV spinlocks enabled\n"); __pv_init_lock_hash(); pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_ops.lock.wait = kvm_wait; pv_ops.lock.kick = kvm_kick_cpu; /* * When PV spinlock is enabled which is preferred over * virt_spin_lock(), virt_spin_lock_key's value is meaningless. * Just disable it anyway. */ out: static_branch_disable(&virt_spin_lock_key); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL static void kvm_disable_host_haltpoll(void *i) { wrmsrq(MSR_KVM_POLL_CONTROL, 0); } static void kvm_enable_host_haltpoll(void *i) { wrmsrq(MSR_KVM_POLL_CONTROL, 1); } void arch_haltpoll_enable(unsigned int cpu) { if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) { pr_err_once("host does not support poll control\n"); pr_err_once("host upgrade recommended\n"); return; } /* Enable guest halt poll disables host halt poll */ smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1); } EXPORT_SYMBOL_GPL(arch_haltpoll_enable); void arch_haltpoll_disable(unsigned int cpu) { if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) return; /* Disable guest halt poll enables host halt poll */ smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1); } EXPORT_SYMBOL_GPL(arch_haltpoll_disable); #endif |
| 22 19 19 19 19 22 15 20 2 20 22 19 18 1 11 3 2 1 1 1 2 1 1 2 3 1 1 1 1 1 1 1 1 10 22 1 1 1 1 1 22 22 22 22 21 3 3 4 1 3 4 7 7 7 3 4 7 2 2 1 1 1 1 2 2 2 1 1 1 1 1 2 3 3 2 2 2 1 3 2 2 3 4 4 4 22 33 9 7 32 33 32 32 8 2 32 32 1 22 22 21 22 7 3 32 1 1 1 1 4 3 1 3 1 1 1 4 2 2 22 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 | // SPDX-License-Identifier: GPL-2.0+ /* * USB FTDI SIO driver * * Copyright (C) 2009 - 2013 * Johan Hovold (jhovold@gmail.com) * Copyright (C) 1999 - 2001 * Greg Kroah-Hartman (greg@kroah.com) * Bill Ryder (bryder@sgi.com) * Copyright (C) 2002 * Kuba Ober (kuba@mareimbrium.org) * * See Documentation/usb/usb-serial.rst for more information on using this * driver * * See http://ftdi-usb-sio.sourceforge.net for up to date testing info * and extra documentation * * Change entries from 2004 and earlier can be found in versions of this * file in kernel versions prior to the 2.6.24 release. * */ /* Bill Ryder - bryder@sgi.com - wrote the FTDI_SIO implementation */ /* Thanx to FTDI for so kindly providing details of the protocol required */ /* to talk to the device */ /* Thanx to gkh and the rest of the usb dev group for all code I have assimilated :-) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/serial.h> #include <linux/gpio/driver.h> #include <linux/usb/serial.h> #include "ftdi_sio.h" #include "ftdi_sio_ids.h" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>" #define DRIVER_DESC "USB FTDI Serial Converters Driver" enum ftdi_chip_type { SIO, FT232A, FT232B, FT2232C, FT232R, FT232H, FT2232H, FT4232H, FT4232HA, FT232HP, FT233HP, FT2232HP, FT2233HP, FT4232HP, FT4233HP, FTX, }; struct ftdi_private { enum ftdi_chip_type chip_type; int baud_base; /* baud base clock for divisor setting */ int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */ u16 last_set_data_value; /* the last data state set - needed for doing * a break */ int flags; /* some ASYNC_xxxx flags are supported */ unsigned long last_dtr_rts; /* saved modem control outputs */ char prev_status; /* Used for TIOCMIWAIT */ char transmit_empty; /* If transmitter is empty or not */ u16 channel; /* channel index, or 0 for legacy types */ speed_t force_baud; /* if non-zero, force the baud rate to this value */ int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ unsigned int latency; /* latency setting in use */ unsigned short max_packet_size; struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */ #ifdef CONFIG_GPIOLIB struct gpio_chip gc; struct mutex gpio_lock; /* protects GPIO state */ bool gpio_registered; /* is the gpiochip in kernel registered */ bool gpio_used; /* true if the user requested a gpio */ u8 gpio_altfunc; /* which pins are in gpio mode */ u8 gpio_output; /* pin directions cache */ u8 gpio_value; /* pin value for outputs */ #endif }; struct ftdi_quirk { int (*probe)(struct usb_serial *); /* Special settings for probed ports. */ void (*port_probe)(struct ftdi_private *); }; static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_stmclite_probe(struct usb_serial *serial); static int ftdi_8u2232c_probe(struct usb_serial *serial); static void ftdi_usb_uirt_setup(struct ftdi_private *priv); static void ftdi_he_tira1_setup(struct ftdi_private *priv); static const struct ftdi_quirk ftdi_jtag_quirk = { .probe = ftdi_jtag_probe, }; static const struct ftdi_quirk ftdi_ndi_quirk = { }; static const struct ftdi_quirk ftdi_usb_uirt_quirk = { .port_probe = ftdi_usb_uirt_setup, }; static const struct ftdi_quirk ftdi_he_tira1_quirk = { .port_probe = ftdi_he_tira1_setup, }; static const struct ftdi_quirk ftdi_stmclite_quirk = { .probe = ftdi_stmclite_probe, }; static const struct ftdi_quirk ftdi_8u2232c_quirk = { .probe = ftdi_8u2232c_probe, }; /* * The 8U232AM has the same API as the sio except for: * - it can support MUCH higher baudrates; up to: * o 921600 for RS232 and 2000000 for RS422/485 at 48MHz * o 230400 at 12MHz * so .. 8U232AM's baudrate setting codes are different * - it has a two byte status code. * - it returns characters every 16ms (the FTDI does it every 40ms) * * the bcdDevice value is used to differentiate FT232BM and FT245BM from * the earlier FT8U232AM and FT8U232BM. For now, include all known VID/PID * combinations in both tables. * FIXME: perhaps bcdDevice can also identify 12MHz FT8U232AM devices, * but I don't know if those ever went into mass production. [Ian Abbott] */ /* * Device ID not listed? Test it using * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report. */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DMX4ALL) }, { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) , .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT2233HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT4233HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT2232HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT4232HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT233HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT232HP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FT4232HA_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) }, { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_633_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_631_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_635_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) }, { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) }, { USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0103_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0104_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0105_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0106_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0107_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0108_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0109_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0110_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0111_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0112_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0113_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0114_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0115_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0116_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0117_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0118_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0119_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0120_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0121_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0122_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0123_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0124_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0125_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0126_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0127_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0128_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0129_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0130_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0131_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0132_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0133_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0134_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0135_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0136_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0137_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0138_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0139_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0140_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0141_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0142_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0143_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0144_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0145_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0146_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0147_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0148_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0149_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0150_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0151_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0152_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0153_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0154_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0155_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0156_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0157_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0158_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0159_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0160_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0161_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0162_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0163_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0164_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0165_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0166_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0167_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0168_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0169_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0170_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0171_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0172_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0173_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0174_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0175_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0176_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0177_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0178_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0179_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0180_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0181_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0182_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0183_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0184_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0185_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0186_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0187_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0188_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0189_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0190_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0191_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0192_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0193_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0194_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0195_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0196_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0197_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0198_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0199_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01ED_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USBX_707_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2104_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2106_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_he_tira1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), .driver_info = (kernel_ulong_t)&ftdi_usb_uirt_quirk }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_1) }, { USB_DEVICE(FTDI_VID, PROTEGO_R2X0) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_3) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_4) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E808_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E809_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80A_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80E_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E888_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E889_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88A_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88E_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UO100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UM100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UR100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_ALC8500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PYRAMID_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1000PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_US485_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PICPRO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PCMCIA_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PK1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_RS232MON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_TIAO_UMPA_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_NT_ORIONLXM_PID, 1) }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) }, /* * ELV devices: */ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UDF77_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UIO88_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UAD8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UDA7_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USI2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_T1100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PCD200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_ULA200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_CSI8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1000DL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_1_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) }, { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) }, { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) }, { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, { USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) }, { USB_DEVICE(FALCOM_VID, FALCOM_SAMBA_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) }, { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_3_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_4_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_YS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_IC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_DB9_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_RS232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y9_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_VCP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_D2XX_PID) }, { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HRC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16IC_PID) }, { USB_DEVICE(KOBIL_VID, KOBIL_CONV_B1_PID) }, { USB_DEVICE(KOBIL_VID, KOBIL_CONV_KAAN_PID) }, { USB_DEVICE(POSIFLEX_VID, POSIFLEX_PP7000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TTUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ECLO_COM_1WIRE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) }, { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) }, { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID), .driver_info = (kernel_ulong_t)&ftdi_ndi_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, CYBER_CORTEX_AV_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FIC_VID, FIC_NEO1973_DEBUG_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_OOCDLINK_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID, 1) }, { USB_DEVICE(FTDI_VID, FTDI_AXE027_PID) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_TURTELIZER_PID, 1) }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, /* Papouch devices based on FTDI chip */ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, { USB_DEVICE(ATMEL_VID, STK541_PID) }, { USB_DEVICE(DE_VID, STB_PID) }, { USB_DEVICE(DE_VID, WHT_PID) }, { USB_DEVICE_INTERFACE_NUMBER(ADI_VID, ADI_GNICE_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ADI_VID, ADI_GNICEPLUS_PID, 1) }, { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0x00) }, { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE_INTERFACE_NUMBER(MARVELL_VID, MARVELL_SHEEVAPLUG_PID, 1) }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, { USB_DEVICE(FTDI_VID, PI_C865_PID) }, { USB_DEVICE(FTDI_VID, PI_C857_PID) }, { USB_DEVICE(PI_VID, PI_C866_PID) }, { USB_DEVICE(PI_VID, PI_C663_PID) }, { USB_DEVICE(PI_VID, PI_C725_PID) }, { USB_DEVICE(PI_VID, PI_E517_PID) }, { USB_DEVICE(PI_VID, PI_C863_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, { USB_DEVICE(PI_VID, PI_C867_PID) }, { USB_DEVICE(PI_VID, PI_E609_PID) }, { USB_DEVICE(PI_VID, PI_E709_PID) }, { USB_DEVICE(PI_VID, PI_100F_PID) }, { USB_DEVICE(PI_VID, PI_1011_PID) }, { USB_DEVICE(PI_VID, PI_1012_PID) }, { USB_DEVICE(PI_VID, PI_1013_PID) }, { USB_DEVICE(PI_VID, PI_1014_PID) }, { USB_DEVICE(PI_VID, PI_1015_PID) }, { USB_DEVICE(PI_VID, PI_1016_PID) }, { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, MARVELL_OPENRD_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, TI_XDS100V2_PID, 1) }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, XVERVE_SIGNALYZER_ST_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID, 1) }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) }, { USB_DEVICE_INTERFACE_NUMBER(IONICS_VID, IONICS_PLUGCOMPUTER_PID, 1) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE_INTERFACE_NUMBER(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ST_VID, ST_STMCLT_2232_PID, 1) }, { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID, 1) }, { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, /* Crucible Devices */ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, /* Cressi Devices */ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) }, /* Brainboxes Devices */ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, /* ekey Devices */ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, /* Infineon Devices */ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) }, /* GE Healthcare devices */ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, /* Active Research (Actisense) devices */ { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, /* Belimo Automation devices */ { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) }, { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) }, /* ICP DAS I-756xU devices */ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, { USB_DEVICE_INTERFACE_NUMBER(TI_VID, TI_CC3200_LAUNCHPAD_PID, 1) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, /* EZPrototypes devices */ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) }, { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) }, /* Sienna devices */ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, /* IDS GmbH devices */ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, /* Omron devices */ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) }, /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, { USB_DEVICE_INTERFACE_NUMBER(UBLOX_VID, UBLOX_EVK_M101_PID, 2) }, /* FreeCalypso USB adapters */ { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID, 1) }, /* GMC devices */ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, /* Altera USB Blaster 3 */ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, /* Abacus Electrics */ { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); static const char *ftdi_chip_name[] = { [SIO] = "SIO", /* the serial part of FT8U100AX */ [FT232A] = "FT232A", [FT232B] = "FT232B", [FT2232C] = "FT2232C/D", [FT232R] = "FT232R", [FT232H] = "FT232H", [FT2232H] = "FT2232H", [FT4232H] = "FT4232H", [FT4232HA] = "FT4232HA", [FT232HP] = "FT232HP", [FT233HP] = "FT233HP", [FT2232HP] = "FT2232HP", [FT2233HP] = "FT2233HP", [FT4232HP] = "FT4232HP", [FT4233HP] = "FT4233HP", [FTX] = "FT-X", }; /* Used for TIOCMIWAIT */ #define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD) #define FTDI_STATUS_B1_MASK (FTDI_RS_BI) /* End TIOCMIWAIT */ static void ftdi_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]); #define WDR_TIMEOUT 5000 /* default urb timeout */ #define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */ /* * *************************************************************************** * Utility functions * *************************************************************************** */ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base) { unsigned short int divisor; /* divisor shifted 3 bits to the left */ int divisor3 = DIV_ROUND_CLOSEST(base, 2 * baud); if ((divisor3 & 0x7) == 7) divisor3++; /* round x.7/8 up to x+1 */ divisor = divisor3 >> 3; divisor3 &= 0x7; if (divisor3 == 1) divisor |= 0xc000; /* +0.125 */ else if (divisor3 >= 4) divisor |= 0x4000; /* +0.5 */ else if (divisor3 != 0) divisor |= 0x8000; /* +0.25 */ else if (divisor == 1) divisor = 0; /* special case for maximum baud rate */ return divisor; } static unsigned short int ftdi_232am_baud_to_divisor(int baud) { return ftdi_232am_baud_base_to_divisor(baud, 48000000); } static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base) { static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 }; u32 divisor; /* divisor shifted 3 bits to the left */ int divisor3 = DIV_ROUND_CLOSEST(base, 2 * baud); divisor = divisor3 >> 3; divisor |= (u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ if (divisor == 1) /* 1.0 */ divisor = 0; else if (divisor == 0x4001) /* 1.5 */ divisor = 1; return divisor; } static u32 ftdi_232bm_baud_to_divisor(int baud) { return ftdi_232bm_baud_base_to_divisor(baud, 48000000); } static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base) { static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 }; u32 divisor; int divisor3; /* hi-speed baud rate is 10-bit sampling instead of 16-bit */ divisor3 = DIV_ROUND_CLOSEST(8 * base, 10 * baud); divisor = divisor3 >> 3; divisor |= (u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ if (divisor == 1) /* 1.0 */ divisor = 0; else if (divisor == 0x4001) /* 1.5 */ divisor = 1; /* * Set this bit to turn off a divide by 2.5 on baud rate generator * This enables baud rates up to 12Mbaud but cannot reach below 1200 * baud with this bit set */ divisor |= 0x00020000; return divisor; } static u32 ftdi_2232h_baud_to_divisor(int baud) { return ftdi_2232h_baud_base_to_divisor(baud, 120000000); } #define set_mctrl(port, set) update_mctrl((port), (set), 0) #define clear_mctrl(port, clear) update_mctrl((port), 0, (clear)) static int update_mctrl(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; unsigned value; int rv; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) { dev_dbg(dev, "%s - DTR|RTS not being set|cleared\n", __func__); return 0; /* no change */ } clear &= ~set; /* 'set' takes precedence over 'clear' */ value = 0; if (clear & TIOCM_DTR) value |= FTDI_SIO_SET_DTR_LOW; if (clear & TIOCM_RTS) value |= FTDI_SIO_SET_RTS_LOW; if (set & TIOCM_DTR) value |= FTDI_SIO_SET_DTR_HIGH; if (set & TIOCM_RTS) value |= FTDI_SIO_SET_RTS_HIGH; rv = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_MODEM_CTRL_REQUEST, FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE, value, priv->channel, NULL, 0, WDR_TIMEOUT); if (rv < 0) { dev_dbg(dev, "%s Error from MODEM_CTRL urb: DTR %s, RTS %s\n", __func__, (set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged", (set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged"); rv = usb_translate_errors(rv); } else { dev_dbg(dev, "%s - DTR %s, RTS %s\n", __func__, (set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged", (set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged"); /* FIXME: locking on last_dtr_rts */ priv->last_dtr_rts = (priv->last_dtr_rts & ~clear) | set; } return rv; } static u32 get_ftdi_divisor(struct tty_struct *tty, struct usb_serial_port *port) { const struct ftdi_quirk *quirk = usb_get_serial_data(port->serial); struct ftdi_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; u32 div_value = 0; int div_okay = 1; int baud; baud = tty_get_baud_rate(tty); dev_dbg(dev, "%s - tty_get_baud_rate reports speed %d\n", __func__, baud); /* * Observe deprecated async-compatible custom_divisor hack, update * baudrate if needed. */ if (baud == 38400 && ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && (priv->custom_divisor)) { baud = priv->baud_base / priv->custom_divisor; dev_dbg(dev, "%s - custom divisor %d sets baud rate to %d\n", __func__, priv->custom_divisor, baud); } if (!baud) baud = 9600; switch (priv->chip_type) { case SIO: switch (baud) { case 300: div_value = ftdi_sio_b300; break; case 600: div_value = ftdi_sio_b600; break; case 1200: div_value = ftdi_sio_b1200; break; case 2400: div_value = ftdi_sio_b2400; break; case 4800: div_value = ftdi_sio_b4800; break; case 9600: div_value = ftdi_sio_b9600; break; case 19200: div_value = ftdi_sio_b19200; break; case 38400: div_value = ftdi_sio_b38400; break; case 57600: div_value = ftdi_sio_b57600; break; case 115200: div_value = ftdi_sio_b115200; break; default: dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n", __func__, baud); div_value = ftdi_sio_b9600; baud = 9600; div_okay = 0; } break; case FT232A: if (baud <= 3000000) { div_value = ftdi_232am_baud_to_divisor(baud); } else { dev_dbg(dev, "%s - Baud rate too high!\n", __func__); baud = 9600; div_value = ftdi_232am_baud_to_divisor(9600); div_okay = 0; } break; case FT232B: case FT2232C: case FT232R: case FTX: if (baud <= 3000000) { if (quirk == &ftdi_ndi_quirk && baud == 19200) baud = 1200000; div_value = ftdi_232bm_baud_to_divisor(baud); } else { dev_dbg(dev, "%s - Baud rate too high!\n", __func__); div_value = ftdi_232bm_baud_to_divisor(9600); div_okay = 0; baud = 9600; } break; default: if ((baud <= 12000000) && (baud >= 1200)) { div_value = ftdi_2232h_baud_to_divisor(baud); } else if (baud < 1200) { div_value = ftdi_232bm_baud_to_divisor(baud); } else { dev_dbg(dev, "%s - Baud rate too high!\n", __func__); div_value = ftdi_232bm_baud_to_divisor(9600); div_okay = 0; baud = 9600; } break; } if (div_okay) { dev_dbg(dev, "%s - Baud rate set to %d (divisor 0x%lX) on chip %s\n", __func__, baud, (unsigned long)div_value, ftdi_chip_name[priv->chip_type]); } tty_encode_baud_rate(tty, baud, baud); return div_value; } static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); u16 value; u16 index; u32 index_value; int rv; index_value = get_ftdi_divisor(tty, port); value = (u16)index_value; index = (u16)(index_value >> 16); if (priv->channel) index = (u16)((index << 8) | priv->channel); rv = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_BAUDRATE_REQUEST, FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE, value, index, NULL, 0, WDR_SHORT_TIMEOUT); return rv; } static int write_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; int rv; int l = priv->latency; if (priv->chip_type == SIO || priv->chip_type == FT232A) return -EINVAL; if (priv->flags & ASYNC_LOW_LATENCY) l = 1; dev_dbg(&port->dev, "%s: setting latency timer = %i\n", __func__, l); rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_LATENCY_TIMER_REQUEST, FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, l, priv->channel, NULL, 0, WDR_TIMEOUT); if (rv < 0) dev_err(&port->dev, "Unable to write latency timer: %i\n", rv); return rv; } static int _read_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; u8 buf; int rv; rv = usb_control_msg_recv(udev, 0, FTDI_SIO_GET_LATENCY_TIMER_REQUEST, FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0, priv->channel, &buf, 1, WDR_TIMEOUT, GFP_KERNEL); if (rv == 0) rv = buf; return rv; } static int read_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); int rv; if (priv->chip_type == SIO || priv->chip_type == FT232A) return -EINVAL; rv = _read_latency_timer(port); if (rv < 0) { dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); return rv; } priv->latency = rv; return 0; } static void get_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); mutex_lock(&priv->cfg_lock); ss->flags = priv->flags; ss->baud_base = priv->baud_base; ss->custom_divisor = priv->custom_divisor; mutex_unlock(&priv->cfg_lock); } static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); int old_flags, old_divisor; mutex_lock(&priv->cfg_lock); if (!capable(CAP_SYS_ADMIN)) { if ((ss->flags ^ priv->flags) & ~ASYNC_USR_MASK) { mutex_unlock(&priv->cfg_lock); return -EPERM; } } old_flags = priv->flags; old_divisor = priv->custom_divisor; priv->flags = ss->flags & ASYNC_FLAGS; priv->custom_divisor = ss->custom_divisor; write_latency_timer(port); if ((priv->flags ^ old_flags) & ASYNC_SPD_MASK || ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST && priv->custom_divisor != old_divisor)) { /* warn about deprecation unless clearing */ if (priv->flags & ASYNC_SPD_MASK) dev_warn_ratelimited(&port->dev, "use of SPD flags is deprecated\n"); change_speed(tty, port); } mutex_unlock(&priv->cfg_lock); return 0; } static int get_lsr_info(struct usb_serial_port *port, unsigned int __user *retinfo) { struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned int result = 0; if (priv->transmit_empty) result = TIOCSER_TEMT; if (copy_to_user(retinfo, &result, sizeof(unsigned int))) return -EFAULT; return 0; } static int ftdi_determine_type(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; struct usb_device *udev = serial->dev; unsigned int version, ifnum; version = le16_to_cpu(udev->descriptor.bcdDevice); ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; /* Assume Hi-Speed type */ priv->baud_base = 120000000 / 2; priv->channel = CHANNEL_A + ifnum; switch (version) { case 0x200: priv->chip_type = FT232A; priv->baud_base = 48000000 / 2; priv->channel = 0; /* * FT232B devices have a bug where bcdDevice gets set to 0x200 * when iSerialNumber is 0. Assume it is an FT232B in case the * latency timer is readable. */ if (udev->descriptor.iSerialNumber == 0 && _read_latency_timer(port) >= 0) { priv->chip_type = FT232B; } break; case 0x400: priv->chip_type = FT232B; priv->baud_base = 48000000 / 2; priv->channel = 0; break; case 0x500: priv->chip_type = FT2232C; priv->baud_base = 48000000 / 2; break; case 0x600: priv->chip_type = FT232R; priv->baud_base = 48000000 / 2; priv->channel = 0; break; case 0x700: priv->chip_type = FT2232H; break; case 0x800: priv->chip_type = FT4232H; break; case 0x900: priv->chip_type = FT232H; break; case 0x1000: priv->chip_type = FTX; priv->baud_base = 48000000 / 2; break; case 0x2800: priv->chip_type = FT2233HP; break; case 0x2900: priv->chip_type = FT4233HP; break; case 0x3000: priv->chip_type = FT2232HP; break; case 0x3100: priv->chip_type = FT4232HP; break; case 0x3200: priv->chip_type = FT233HP; break; case 0x3300: priv->chip_type = FT232HP; break; case 0x3600: priv->chip_type = FT4232HA; break; default: if (version < 0x200) { priv->chip_type = SIO; priv->baud_base = 12000000 / 16; priv->channel = 0; } else { dev_err(&port->dev, "unknown device type: 0x%02x\n", version); return -ENODEV; } } dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); return 0; } /* * Determine the maximum packet size for the device. This depends on the chip * type and the USB host capabilities. The value should be obtained from the * device descriptor as the chip will use the appropriate values for the host. */ static void ftdi_set_max_packet_size(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_interface *interface = port->serial->interface; struct usb_endpoint_descriptor *ep_desc; unsigned num_endpoints; unsigned i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; if (!num_endpoints) return; /* * NOTE: Some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a * value of 64 for wMaxPacketSize. */ for (i = 0; i < num_endpoints; i++) { ep_desc = &interface->cur_altsetting->endpoint[i].desc; if (!ep_desc->wMaxPacketSize) { ep_desc->wMaxPacketSize = cpu_to_le16(0x40); dev_warn(&port->dev, "Overriding wMaxPacketSize on endpoint %d\n", usb_endpoint_num(ep_desc)); } } /* Set max packet size based on last descriptor. */ priv->max_packet_size = usb_endpoint_maxp(ep_desc); } /* * *************************************************************************** * Sysfs Attribute * *************************************************************************** */ static ssize_t latency_timer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); if (priv->flags & ASYNC_LOW_LATENCY) return sprintf(buf, "1\n"); else return sprintf(buf, "%u\n", priv->latency); } /* Write a new value of the latency timer, in units of milliseconds. */ static ssize_t latency_timer_store(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); u8 v; int rv; if (kstrtou8(valbuf, 10, &v)) return -EINVAL; priv->latency = v; rv = write_latency_timer(port); if (rv < 0) return -EIO; return count; } static DEVICE_ATTR_RW(latency_timer); /* Write an event character directly to the FTDI register. The ASCII value is in the low 8 bits, with the enable bit in the 9th bit. */ static ssize_t event_char_store(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; unsigned int v; int rv; if (kstrtouint(valbuf, 0, &v) || v >= 0x200) return -EINVAL; dev_dbg(&port->dev, "%s: setting event char = 0x%03x\n", __func__, v); rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_EVENT_CHAR_REQUEST, FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE, v, priv->channel, NULL, 0, WDR_TIMEOUT); if (rv < 0) { dev_dbg(&port->dev, "Unable to write event character: %i\n", rv); return -EIO; } return count; } static DEVICE_ATTR_WO(event_char); static struct attribute *ftdi_attrs[] = { &dev_attr_event_char.attr, &dev_attr_latency_timer.attr, NULL }; static umode_t ftdi_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); enum ftdi_chip_type type = priv->chip_type; if (attr == &dev_attr_event_char.attr) { if (type == SIO) return 0; } if (attr == &dev_attr_latency_timer.attr) { if (type == SIO || type == FT232A) return 0; } return attr->mode; } static const struct attribute_group ftdi_group = { .attrs = ftdi_attrs, .is_visible = ftdi_is_visible, }; static const struct attribute_group *ftdi_groups[] = { &ftdi_group, NULL }; #ifdef CONFIG_GPIOLIB static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int result; u16 val; result = usb_autopm_get_interface(serial->interface); if (result) return result; val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), FTDI_SIO_SET_BITMODE_REQUEST, FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val, priv->channel, NULL, 0, WDR_TIMEOUT); if (result < 0) { dev_err(&serial->interface->dev, "bitmode request failed for value 0x%04x: %d\n", val, result); } usb_autopm_put_interface(serial->interface); return result; } static int ftdi_set_cbus_pins(struct usb_serial_port *port) { return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_CBUS); } static int ftdi_exit_cbus_mode(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); priv->gpio_output = 0; priv->gpio_value = 0; return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_RESET); } static int ftdi_gpio_request(struct gpio_chip *gc, unsigned int offset) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); int result; mutex_lock(&priv->gpio_lock); if (!priv->gpio_used) { /* Set default pin states, as we cannot get them from device */ priv->gpio_output = 0x00; priv->gpio_value = 0x00; result = ftdi_set_cbus_pins(port); if (result) { mutex_unlock(&priv->gpio_lock); return result; } priv->gpio_used = true; } mutex_unlock(&priv->gpio_lock); return 0; } static int ftdi_read_cbus_pins(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; u8 buf; int result; result = usb_autopm_get_interface(serial->interface); if (result) return result; result = usb_control_msg_recv(serial->dev, 0, FTDI_SIO_READ_PINS_REQUEST, FTDI_SIO_READ_PINS_REQUEST_TYPE, 0, priv->channel, &buf, 1, WDR_TIMEOUT, GFP_KERNEL); if (result == 0) result = buf; usb_autopm_put_interface(serial->interface); return result; } static int ftdi_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial_port *port = gpiochip_get_data(gc); int result; result = ftdi_read_cbus_pins(port); if (result < 0) return result; return !!(result & BIT(gpio)); } static int ftdi_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); int result; mutex_lock(&priv->gpio_lock); if (value) priv->gpio_value |= BIT(gpio); else priv->gpio_value &= ~BIT(gpio); result = ftdi_set_cbus_pins(port); mutex_unlock(&priv->gpio_lock); return result; } static int ftdi_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct usb_serial_port *port = gpiochip_get_data(gc); int result; result = ftdi_read_cbus_pins(port); if (result < 0) return result; *bits = result & *mask; return 0; } static int ftdi_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); int result; mutex_lock(&priv->gpio_lock); priv->gpio_value &= ~(*mask); priv->gpio_value |= *bits & *mask; result = ftdi_set_cbus_pins(port); mutex_unlock(&priv->gpio_lock); return result; } static int ftdi_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); return !(priv->gpio_output & BIT(gpio)); } static int ftdi_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); int result; mutex_lock(&priv->gpio_lock); priv->gpio_output &= ~BIT(gpio); result = ftdi_set_cbus_pins(port); mutex_unlock(&priv->gpio_lock); return result; } static int ftdi_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); int result; mutex_lock(&priv->gpio_lock); priv->gpio_output |= BIT(gpio); if (value) priv->gpio_value |= BIT(gpio); else priv->gpio_value &= ~BIT(gpio); result = ftdi_set_cbus_pins(port); mutex_unlock(&priv->gpio_lock); return result; } static int ftdi_gpio_init_valid_mask(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios) { struct usb_serial_port *port = gpiochip_get_data(gc); struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned long map = priv->gpio_altfunc; bitmap_complement(valid_mask, &map, ngpios); if (bitmap_empty(valid_mask, ngpios)) dev_dbg(&port->dev, "no CBUS pin configured for GPIO\n"); else dev_dbg(&port->dev, "CBUS%*pbl configured for GPIO\n", ngpios, valid_mask); return 0; } static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr, u16 nbytes) { int read = 0; if (addr % 2 != 0) return -EINVAL; if (nbytes % 2 != 0) return -EINVAL; /* Read EEPROM two bytes at a time */ while (read < nbytes) { int rv; rv = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), FTDI_SIO_READ_EEPROM_REQUEST, FTDI_SIO_READ_EEPROM_REQUEST_TYPE, 0, (addr + read) / 2, dst + read, 2, WDR_TIMEOUT); if (rv < 2) { if (rv >= 0) return -EIO; else return rv; } read += rv; } return 0; } static int ftdi_gpio_init_ft232h(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); u16 cbus_config; u8 *buf; int ret; int i; buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ftdi_read_eeprom(port->serial, buf, 0x1a, 4); if (ret < 0) goto out_free; /* * FT232H CBUS Memory Map * * 0x1a: X- (upper nibble -> AC5) * 0x1b: -X (lower nibble -> AC6) * 0x1c: XX (upper nibble -> AC9 | lower nibble -> AC8) */ cbus_config = buf[2] << 8 | (buf[1] & 0xf) << 4 | (buf[0] & 0xf0) >> 4; priv->gc.ngpio = 4; priv->gpio_altfunc = 0xff; for (i = 0; i < priv->gc.ngpio; ++i) { if ((cbus_config & 0xf) == FTDI_FTX_CBUS_MUX_GPIO) priv->gpio_altfunc &= ~BIT(i); cbus_config >>= 4; } out_free: kfree(buf); return ret; } static int ftdi_gpio_init_ft232r(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); u16 cbus_config; u8 *buf; int ret; int i; buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ftdi_read_eeprom(port->serial, buf, 0x14, 2); if (ret < 0) goto out_free; cbus_config = le16_to_cpup((__le16 *)buf); dev_dbg(&port->dev, "cbus_config = 0x%04x\n", cbus_config); priv->gc.ngpio = 4; priv->gpio_altfunc = 0xff; for (i = 0; i < priv->gc.ngpio; ++i) { if ((cbus_config & 0xf) == FTDI_FT232R_CBUS_MUX_GPIO) priv->gpio_altfunc &= ~BIT(i); cbus_config >>= 4; } out_free: kfree(buf); return ret; } static int ftdi_gpio_init_ftx(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; const u16 cbus_cfg_addr = 0x1a; const u16 cbus_cfg_size = 4; u8 *cbus_cfg_buf; int result; u8 i; cbus_cfg_buf = kmalloc(cbus_cfg_size, GFP_KERNEL); if (!cbus_cfg_buf) return -ENOMEM; result = ftdi_read_eeprom(serial, cbus_cfg_buf, cbus_cfg_addr, cbus_cfg_size); if (result < 0) goto out_free; /* FIXME: FT234XD alone has 1 GPIO, but how to recognize this IC? */ priv->gc.ngpio = 4; /* Determine which pins are configured for CBUS bitbanging */ priv->gpio_altfunc = 0xff; for (i = 0; i < priv->gc.ngpio; ++i) { if (cbus_cfg_buf[i] == FTDI_FTX_CBUS_MUX_GPIO) priv->gpio_altfunc &= ~BIT(i); } out_free: kfree(cbus_cfg_buf); return result; } static int ftdi_gpio_init(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int result; switch (priv->chip_type) { case FT232H: result = ftdi_gpio_init_ft232h(port); break; case FT232R: result = ftdi_gpio_init_ft232r(port); break; case FTX: result = ftdi_gpio_init_ftx(port); break; default: return 0; } if (result < 0) return result; mutex_init(&priv->gpio_lock); priv->gc.label = "ftdi-cbus"; priv->gc.request = ftdi_gpio_request; priv->gc.get_direction = ftdi_gpio_direction_get; priv->gc.direction_input = ftdi_gpio_direction_input; priv->gc.direction_output = ftdi_gpio_direction_output; priv->gc.init_valid_mask = ftdi_gpio_init_valid_mask; priv->gc.get = ftdi_gpio_get; priv->gc.set = ftdi_gpio_set; priv->gc.get_multiple = ftdi_gpio_get_multiple; priv->gc.set_multiple = ftdi_gpio_set_multiple; priv->gc.owner = THIS_MODULE; priv->gc.parent = &serial->interface->dev; priv->gc.base = -1; priv->gc.can_sleep = true; result = gpiochip_add_data(&priv->gc, port); if (!result) priv->gpio_registered = true; return result; } static void ftdi_gpio_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); if (priv->gpio_registered) { gpiochip_remove(&priv->gc); priv->gpio_registered = false; } if (priv->gpio_used) { /* Exiting CBUS-mode does not reset pin states. */ ftdi_exit_cbus_mode(port); priv->gpio_used = false; } } #else static int ftdi_gpio_init(struct usb_serial_port *port) { return 0; } static void ftdi_gpio_remove(struct usb_serial_port *port) { } #endif /* CONFIG_GPIOLIB */ /* * *************************************************************************** * FTDI driver specific functions * *************************************************************************** */ static int ftdi_probe(struct usb_serial *serial, const struct usb_device_id *id) { const struct ftdi_quirk *quirk = (struct ftdi_quirk *)id->driver_info; if (quirk && quirk->probe) { int ret = quirk->probe(serial); if (ret != 0) return ret; } usb_set_serial_data(serial, (void *)id->driver_info); return 0; } static int ftdi_port_probe(struct usb_serial_port *port) { const struct ftdi_quirk *quirk = usb_get_serial_data(port->serial); struct ftdi_private *priv; int result; priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->cfg_lock); if (quirk && quirk->port_probe) quirk->port_probe(priv); usb_set_serial_port_data(port, priv); result = ftdi_determine_type(port); if (result) goto err_free; ftdi_set_max_packet_size(port); if (quirk == &ftdi_ndi_quirk) priv->latency = 1; else if (read_latency_timer(port) < 0) priv->latency = 16; write_latency_timer(port); result = ftdi_gpio_init(port); if (result < 0) { dev_err(&port->serial->interface->dev, "GPIO initialisation failed: %d\n", result); } return 0; err_free: kfree(priv); return result; } /* * Setup for the USB-UIRT device, which requires hardwired baudrate * (38400 gets mapped to 312500). */ static void ftdi_usb_uirt_setup(struct ftdi_private *priv) { priv->flags |= ASYNC_SPD_CUST; priv->custom_divisor = 77; priv->force_baud = 38400; } /* * Setup for the HE-TIRA1 device, which requires hardwired baudrate * (38400 gets mapped to 100000) and RTS-CTS enabled. */ static void ftdi_he_tira1_setup(struct ftdi_private *priv) { priv->flags |= ASYNC_SPD_CUST; priv->custom_divisor = 240; priv->force_baud = 38400; priv->force_rtscts = 1; } /* * First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko * Neo1973 Debug Board is reserved for JTAG interface and can be accessed from * userspace using openocd. */ static int ftdi_jtag_probe(struct usb_serial *serial) { struct usb_interface *intf = serial->interface; int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) return -ENODEV; return 0; } static int ftdi_8u2232c_probe(struct usb_serial *serial) { struct usb_interface *intf = serial->interface; struct usb_device *udev = serial->dev; int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) { if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) return -ENODEV; if (udev->product && (!strcmp(udev->product, "Arrow USB Blaster") || !strcmp(udev->product, "BeagleBone/XDS100V2") || !strcmp(udev->product, "SNAP Connect E10"))) return -ENODEV; } return 0; } /* * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and * can be accessed from userspace. * The next two ports are enabled as UARTs by default, where port 2 is * a conventional RS-232 UART. */ static int ftdi_stmclite_probe(struct usb_serial *serial) { struct usb_interface *intf = serial->interface; int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum < 2) return -ENODEV; return 0; } static void ftdi_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); ftdi_gpio_remove(port); kfree(priv); } static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); /* No error checking for this (will get errors later anyway) */ /* See ftdi_sio.h for description of what is reset */ usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE, FTDI_SIO_RESET_SIO, priv->channel, NULL, 0, WDR_TIMEOUT); /* Termios defaults are set by usb_serial_init. We don't change port->tty->termios - this would lose speed settings, etc. This is same behaviour as serial.c/rs_open() - Kuba */ /* ftdi_set_termios will send usb control messages */ if (tty) ftdi_set_termios(tty, port, NULL); return usb_serial_generic_open(tty, port); } static void ftdi_dtr_rts(struct usb_serial_port *port, int on) { struct ftdi_private *priv = usb_get_serial_port_data(port); /* Disable flow control */ if (!on) { if (usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->channel, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "error from flowcontrol urb\n"); } } /* drop RTS and DTR */ if (on) set_mctrl(port, TIOCM_DTR | TIOCM_RTS); else clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } /* The SIO requires the first byte to have: * B0 1 * B1 0 * B2..7 length of message excluding byte 0 * * The new devices do not require this byte */ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size) { struct ftdi_private *priv; int count; unsigned long flags; priv = usb_get_serial_port_data(port); if (priv->chip_type == SIO) { unsigned char *buffer = dest; int i, len, c; count = 0; spin_lock_irqsave(&port->lock, flags); for (i = 0; i < size - 1; i += priv->max_packet_size) { len = min_t(int, size - i, priv->max_packet_size) - 1; c = kfifo_out(&port->write_fifo, &buffer[i + 1], len); if (!c) break; port->icount.tx += c; buffer[i] = (c << 2) + 1; count += c + 1; } spin_unlock_irqrestore(&port->lock, flags); } else { count = kfifo_out_locked(&port->write_fifo, dest, size, &port->lock); port->icount.tx += count; } return count; } #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct usb_serial_port *port, struct ftdi_private *priv, unsigned char *buf, int len) { unsigned char status; bool brkint = false; int i; char flag; if (len < 2) { dev_dbg(&port->dev, "malformed packet\n"); return 0; } /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ status = buf[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; if (diff_status & FTDI_RS0_CTS) port->icount.cts++; if (diff_status & FTDI_RS0_DSR) port->icount.dsr++; if (diff_status & FTDI_RS0_RI) port->icount.rng++; if (diff_status & FTDI_RS0_RLSD) { struct tty_struct *tty; port->icount.dcd++; tty = tty_port_tty_get(&port->port); if (tty) usb_serial_handle_dcd_change(port, tty, status & FTDI_RS0_RLSD); tty_kref_put(tty); } wake_up_interruptible(&port->port.delta_msr_wait); priv->prev_status = status; } /* save if the transmitter is empty or not */ if (buf[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; if (len == 2) return 0; /* status only */ /* * Break and error status must only be processed for packets with * data payload to avoid over-reporting. */ flag = TTY_NORMAL; if (buf[1] & FTDI_RS_ERR_MASK) { /* * Break takes precedence over parity, which takes precedence * over framing errors. Note that break is only associated * with the last character in the buffer and only when it's a * NUL. */ if (buf[1] & FTDI_RS_BI && buf[len - 1] == '\0') { port->icount.brk++; brkint = true; } if (buf[1] & FTDI_RS_PE) { flag = TTY_PARITY; port->icount.parity++; } else if (buf[1] & FTDI_RS_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Overrun is special, not associated with a char */ if (buf[1] & FTDI_RS_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } port->icount.rx += len - 2; if (brkint || port->sysrq) { for (i = 2; i < len; i++) { if (brkint && i == len - 1) { if (usb_serial_handle_break(port)) return len - 3; flag = TTY_BREAK; } if (usb_serial_handle_sysrq_char(port, buf[i])) continue; tty_insert_flip_char(&port->port, buf[i], flag); } } else { tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, len - 2); } return len - 2; } static void ftdi_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct ftdi_private *priv = usb_get_serial_port_data(port); char *data = urb->transfer_buffer; int i; int len; int count = 0; for (i = 0; i < urb->actual_length; i += priv->max_packet_size) { len = min_t(int, urb->actual_length - i, priv->max_packet_size); count += ftdi_process_packet(port, priv, &data[i], len); } if (count) tty_flip_buffer_push(&port->port); } static int ftdi_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); u16 value; int ret; /* break_state = -1 to turn on break, and 0 to turn off break */ /* see drivers/char/tty_io.c to see it used */ /* last_set_data_value NEVER has the break bit set in it */ if (break_state) value = priv->last_set_data_value | FTDI_SIO_SET_BREAK; else value = priv->last_set_data_value; ret = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, value, priv->channel, NULL, 0, WDR_TIMEOUT); if (ret < 0) { dev_err(&port->dev, "%s FAILED to enable/disable break state (state was %d)\n", __func__, break_state); return ret; } dev_dbg(&port->dev, "%s break state is %d - urb is %d\n", __func__, break_state, value); return 0; } static bool ftdi_tx_empty(struct usb_serial_port *port) { unsigned char buf[2]; int ret; ret = ftdi_get_modem_status(port, buf); if (ret == 2) { if (!(buf[1] & FTDI_RS_TEMT)) return false; } return true; } /* old_termios contains the original termios settings and tty->termios contains * the new setting to be used * WARNING: set_termios calls this with old_termios in kernel space */ static void ftdi_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_device *dev = port->serial->dev; struct device *ddev = &port->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = &tty->termios; unsigned int cflag; u16 value, index; int ret; /* Force baud rate if this device requires it, unless it is set to B0. */ if (priv->force_baud && ((termios->c_cflag & CBAUD) != B0)) { dev_dbg(ddev, "%s: forcing baud rate for this device\n", __func__); tty_encode_baud_rate(tty, priv->force_baud, priv->force_baud); } /* Force RTS-CTS if this device requires it. */ if (priv->force_rtscts) { dev_dbg(ddev, "%s: forcing rtscts for this device\n", __func__); termios->c_cflag |= CRTSCTS; } /* * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to * support CS5/6 and revert the CSIZE setting instead. * * CS5 however is used to control some smartcard readers which abuse * this limitation to switch modes. Original FTDI chips fall back to * eight data bits. * * TODO: Implement a quirk to only allow this with mentioned * readers. One I know of (Argolis Smartreader V1) * returns "USB smartcard server" as iInterface string. * The vendor didn't bother with a custom VID/PID of * course. */ if (C_CSIZE(tty) == CS6) { dev_warn(ddev, "requested CSIZE setting not supported\n"); termios->c_cflag &= ~CSIZE; if (old_termios) termios->c_cflag |= old_termios->c_cflag & CSIZE; else termios->c_cflag |= CS8; } cflag = termios->c_cflag; if (!old_termios) goto no_skip; if (old_termios->c_cflag == termios->c_cflag && old_termios->c_ispeed == termios->c_ispeed && old_termios->c_ospeed == termios->c_ospeed) goto no_c_cflag_changes; /* NOTE These routines can get interrupted by ftdi_sio_read_bulk_callback - need to examine what this means - don't see any problems yet */ if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) == (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB))) goto no_data_parity_stop_changes; no_skip: /* Set number of data bits, parity, stop bits */ value = 0; value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 : FTDI_SIO_SET_DATA_STOP_BITS_1); if (cflag & PARENB) { if (cflag & CMSPAR) value |= cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_MARK : FTDI_SIO_SET_DATA_PARITY_SPACE; else value |= cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_ODD : FTDI_SIO_SET_DATA_PARITY_EVEN; } else { value |= FTDI_SIO_SET_DATA_PARITY_NONE; } switch (cflag & CSIZE) { case CS5: dev_dbg(ddev, "Setting CS5 quirk\n"); break; case CS7: value |= 7; dev_dbg(ddev, "Setting CS7\n"); break; default: case CS8: value |= 8; dev_dbg(ddev, "Setting CS8\n"); break; } /* This is needed by the break command since it uses the same command - but is or'ed with this value */ priv->last_set_data_value = value; if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, value, priv->channel, NULL, 0, WDR_SHORT_TIMEOUT) < 0) { dev_err(ddev, "%s FAILED to set databits/stopbits/parity\n", __func__); } /* Now do the baudrate */ no_data_parity_stop_changes: if ((cflag & CBAUD) == B0) { /* Disable flow control */ if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->channel, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(ddev, "%s error from disable flowcontrol urb\n", __func__); } /* Drop RTS and DTR */ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } else { /* set the baudrate determined before */ mutex_lock(&priv->cfg_lock); if (change_speed(tty, port)) dev_err(ddev, "%s urb failed to set baudrate\n", __func__); mutex_unlock(&priv->cfg_lock); /* Ensure RTS and DTR are raised when baudrate changed from 0 */ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } no_c_cflag_changes: /* Set hardware-assisted flow control */ value = 0; if (C_CRTSCTS(tty)) { dev_dbg(&port->dev, "enabling rts/cts flow control\n"); index = FTDI_SIO_RTS_CTS_HS; } else if (I_IXON(tty)) { dev_dbg(&port->dev, "enabling xon/xoff flow control\n"); index = FTDI_SIO_XON_XOFF_HS; value = STOP_CHAR(tty) << 8 | START_CHAR(tty); } else { dev_dbg(&port->dev, "disabling flow control\n"); index = FTDI_SIO_DISABLE_FLOW_CTRL; } index |= priv->channel; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, value, index, NULL, 0, WDR_TIMEOUT); if (ret < 0) dev_err(&port->dev, "failed to set flow control: %d\n", ret); } /* * Get modem-control status. * * Returns the number of status bytes retrieved (device dependant), or * negative error code. */ static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]) { struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char *buf; int len; int ret; buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; /* * The device returns a two byte value (the SIO a 1 byte value) in the * same format as the data returned from the IN endpoint. */ if (priv->chip_type == SIO) len = 1; else len = 2; ret = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), FTDI_SIO_GET_MODEM_STATUS_REQUEST, FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, 0, priv->channel, buf, len, WDR_TIMEOUT); /* NOTE: We allow short responses and handle that below. */ if (ret < 1) { dev_err(&port->dev, "failed to get modem status: %d\n", ret); if (ret >= 0) ret = -EIO; ret = usb_translate_errors(ret); goto out; } status[0] = buf[0]; if (ret > 1) status[1] = buf[1]; else status[1] = 0; dev_dbg(&port->dev, "%s - 0x%02x%02x\n", __func__, status[0], status[1]); out: kfree(buf); return ret; } static int ftdi_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char buf[2]; int ret; ret = ftdi_get_modem_status(port, buf); if (ret < 0) return ret; ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) | (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) | (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) | priv->last_dtr_rts; return ret; } static int ftdi_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return update_mctrl(port, set, clear); } static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; void __user *argp = (void __user *)arg; switch (cmd) { case TIOCSERGETLSR: return get_lsr_info(port, argp); default: break; } return -ENOIOCTLCMD; } static struct usb_serial_driver ftdi_device = { .driver = { .name = "ftdi_sio", .dev_groups = ftdi_groups, }, .description = "FTDI USB Serial Device", .id_table = id_table_combined, .num_ports = 1, .bulk_in_size = 512, .bulk_out_size = 256, .probe = ftdi_probe, .port_probe = ftdi_port_probe, .port_remove = ftdi_port_remove, .open = ftdi_open, .dtr_rts = ftdi_dtr_rts, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .process_read_urb = ftdi_process_read_urb, .prepare_write_buffer = ftdi_prepare_write_buffer, .tiocmget = ftdi_tiocmget, .tiocmset = ftdi_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .ioctl = ftdi_ioctl, .get_serial = get_serial_info, .set_serial = set_serial_info, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, .tx_empty = ftdi_tx_empty, }; static struct usb_serial_driver * const serial_drivers[] = { &ftdi_device, NULL }; module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
| 28 27 28 28 28 28 28 28 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 | // SPDX-License-Identifier: GPL-2.0-or-later /* * MIDI byte <-> sequencer event coder * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de>, * Jaroslav Kysela <perex@perex.cz> */ #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/module.h> #include <sound/core.h> #include <sound/seq_kernel.h> #include <sound/seq_midi_event.h> #include <sound/asoundef.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("MIDI byte <-> sequencer event coder"); MODULE_LICENSE("GPL"); /* event type, index into status_event[] */ /* from 0 to 6 are normal commands (note off, on, etc.) for 0x9?-0xe? */ #define ST_INVALID 7 #define ST_SPECIAL 8 #define ST_SYSEX ST_SPECIAL /* from 8 to 15 are events for 0xf0-0xf7 */ /* * prototypes */ static void note_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void one_param_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void pitchbend_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void two_param_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void one_param_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void songpos_event(struct snd_midi_event *dev, struct snd_seq_event *ev); static void note_decode(struct snd_seq_event *ev, unsigned char *buf); static void one_param_decode(struct snd_seq_event *ev, unsigned char *buf); static void pitchbend_decode(struct snd_seq_event *ev, unsigned char *buf); static void two_param_decode(struct snd_seq_event *ev, unsigned char *buf); static void songpos_decode(struct snd_seq_event *ev, unsigned char *buf); /* * event list */ static struct status_event_list { int event; int qlen; void (*encode)(struct snd_midi_event *dev, struct snd_seq_event *ev); void (*decode)(struct snd_seq_event *ev, unsigned char *buf); } status_event[] = { /* 0x80 - 0xef */ {SNDRV_SEQ_EVENT_NOTEOFF, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_NOTEON, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_KEYPRESS, 2, note_event, note_decode}, {SNDRV_SEQ_EVENT_CONTROLLER, 2, two_param_ctrl_event, two_param_decode}, {SNDRV_SEQ_EVENT_PGMCHANGE, 1, one_param_ctrl_event, one_param_decode}, {SNDRV_SEQ_EVENT_CHANPRESS, 1, one_param_ctrl_event, one_param_decode}, {SNDRV_SEQ_EVENT_PITCHBEND, 2, pitchbend_ctrl_event, pitchbend_decode}, /* invalid */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xf0 - 0xff */ {SNDRV_SEQ_EVENT_SYSEX, 1, NULL, NULL}, /* sysex: 0xf0 */ {SNDRV_SEQ_EVENT_QFRAME, 1, one_param_event, one_param_decode}, /* 0xf1 */ {SNDRV_SEQ_EVENT_SONGPOS, 2, songpos_event, songpos_decode}, /* 0xf2 */ {SNDRV_SEQ_EVENT_SONGSEL, 1, one_param_event, one_param_decode}, /* 0xf3 */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xf4 */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xf5 */ {SNDRV_SEQ_EVENT_TUNE_REQUEST, 0, NULL, NULL}, /* 0xf6 */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xf7 */ {SNDRV_SEQ_EVENT_CLOCK, 0, NULL, NULL}, /* 0xf8 */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xf9 */ {SNDRV_SEQ_EVENT_START, 0, NULL, NULL}, /* 0xfa */ {SNDRV_SEQ_EVENT_CONTINUE, 0, NULL, NULL}, /* 0xfb */ {SNDRV_SEQ_EVENT_STOP, 0, NULL, NULL}, /* 0xfc */ {SNDRV_SEQ_EVENT_NONE, -1, NULL, NULL}, /* 0xfd */ {SNDRV_SEQ_EVENT_SENSING, 0, NULL, NULL}, /* 0xfe */ {SNDRV_SEQ_EVENT_RESET, 0, NULL, NULL}, /* 0xff */ }; static int extra_decode_ctrl14(struct snd_midi_event *dev, unsigned char *buf, int len, struct snd_seq_event *ev); static int extra_decode_xrpn(struct snd_midi_event *dev, unsigned char *buf, int count, struct snd_seq_event *ev); static struct extra_event_list { int event; int (*decode)(struct snd_midi_event *dev, unsigned char *buf, int len, struct snd_seq_event *ev); } extra_event[] = { {SNDRV_SEQ_EVENT_CONTROL14, extra_decode_ctrl14}, {SNDRV_SEQ_EVENT_NONREGPARAM, extra_decode_xrpn}, {SNDRV_SEQ_EVENT_REGPARAM, extra_decode_xrpn}, }; /* * new/delete record */ int snd_midi_event_new(int bufsize, struct snd_midi_event **rdev) { struct snd_midi_event *dev; *rdev = NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; if (bufsize > 0) { dev->buf = kmalloc(bufsize, GFP_KERNEL); if (dev->buf == NULL) { kfree(dev); return -ENOMEM; } } dev->bufsize = bufsize; dev->lastcmd = 0xff; dev->type = ST_INVALID; spin_lock_init(&dev->lock); *rdev = dev; return 0; } EXPORT_SYMBOL(snd_midi_event_new); void snd_midi_event_free(struct snd_midi_event *dev) { if (dev != NULL) { kfree(dev->buf); kfree(dev); } } EXPORT_SYMBOL(snd_midi_event_free); /* * initialize record */ static inline void reset_encode(struct snd_midi_event *dev) { dev->read = 0; dev->qlen = 0; dev->type = ST_INVALID; } void snd_midi_event_reset_encode(struct snd_midi_event *dev) { guard(spinlock_irqsave)(&dev->lock); reset_encode(dev); } EXPORT_SYMBOL(snd_midi_event_reset_encode); void snd_midi_event_reset_decode(struct snd_midi_event *dev) { guard(spinlock_irqsave)(&dev->lock); dev->lastcmd = 0xff; } EXPORT_SYMBOL(snd_midi_event_reset_decode); void snd_midi_event_no_status(struct snd_midi_event *dev, int on) { dev->nostat = on ? 1 : 0; } EXPORT_SYMBOL(snd_midi_event_no_status); /* * read one byte and encode to sequencer event: * return true if MIDI bytes are encoded to an event * false data is not finished */ bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c, struct snd_seq_event *ev) { bool rc = false; if (c >= MIDI_CMD_COMMON_CLOCK) { /* real-time event */ ev->type = status_event[ST_SPECIAL + c - 0xf0].event; ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_FIXED; return ev->type != SNDRV_SEQ_EVENT_NONE; } guard(spinlock_irqsave)(&dev->lock); if ((c & 0x80) && (c != MIDI_CMD_COMMON_SYSEX_END || dev->type != ST_SYSEX)) { /* new command */ dev->buf[0] = c; if ((c & 0xf0) == 0xf0) /* system messages */ dev->type = (c & 0x0f) + ST_SPECIAL; else dev->type = (c >> 4) & 0x07; dev->read = 1; dev->qlen = status_event[dev->type].qlen; } else { if (dev->qlen > 0) { /* rest of command */ dev->buf[dev->read++] = c; if (dev->type != ST_SYSEX) dev->qlen--; } else { /* running status */ dev->buf[1] = c; dev->qlen = status_event[dev->type].qlen - 1; dev->read = 2; } } if (dev->qlen == 0) { ev->type = status_event[dev->type].event; ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_FIXED; if (status_event[dev->type].encode) /* set data values */ status_event[dev->type].encode(dev, ev); if (dev->type >= ST_SPECIAL) dev->type = ST_INVALID; rc = true; } else if (dev->type == ST_SYSEX) { if (c == MIDI_CMD_COMMON_SYSEX_END || dev->read >= dev->bufsize) { ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_VARIABLE; ev->type = SNDRV_SEQ_EVENT_SYSEX; ev->data.ext.len = dev->read; ev->data.ext.ptr = dev->buf; if (c != MIDI_CMD_COMMON_SYSEX_END) dev->read = 0; /* continue to parse */ else reset_encode(dev); /* all parsed */ rc = true; } } return rc; } EXPORT_SYMBOL(snd_midi_event_encode_byte); /* encode note event */ static void note_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.note.channel = dev->buf[0] & 0x0f; ev->data.note.note = dev->buf[1]; ev->data.note.velocity = dev->buf[2]; } /* encode one parameter controls */ static void one_param_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.value = dev->buf[1]; } /* encode pitch wheel change */ static void pitchbend_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.value = (int)dev->buf[2] * 128 + (int)dev->buf[1] - 8192; } /* encode midi control change */ static void two_param_ctrl_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.control.channel = dev->buf[0] & 0x0f; ev->data.control.param = dev->buf[1]; ev->data.control.value = dev->buf[2]; } /* encode one parameter value*/ static void one_param_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.control.value = dev->buf[1]; } /* encode song position */ static void songpos_event(struct snd_midi_event *dev, struct snd_seq_event *ev) { ev->data.control.value = (int)dev->buf[2] * 128 + (int)dev->buf[1]; } /* * decode from a sequencer event to midi bytes * return the size of decoded midi events */ long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count, struct snd_seq_event *ev) { unsigned int cmd, type; if (ev->type == SNDRV_SEQ_EVENT_NONE) return -ENOENT; for (type = 0; type < ARRAY_SIZE(status_event); type++) { if (ev->type == status_event[type].event) goto __found; } for (type = 0; type < ARRAY_SIZE(extra_event); type++) { if (ev->type == extra_event[type].event) return extra_event[type].decode(dev, buf, count, ev); } return -ENOENT; __found: if (type >= ST_SPECIAL) cmd = 0xf0 + (type - ST_SPECIAL); else /* data.note.channel and data.control.channel is identical */ cmd = 0x80 | (type << 4) | (ev->data.note.channel & 0x0f); if (cmd == MIDI_CMD_COMMON_SYSEX) { snd_midi_event_reset_decode(dev); return snd_seq_expand_var_event(ev, count, buf, 1, 0); } else { int qlen; unsigned char xbuf[4]; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if ((cmd & 0xf0) == 0xf0 || dev->lastcmd != cmd || dev->nostat) { dev->lastcmd = cmd; spin_unlock_irqrestore(&dev->lock, flags); xbuf[0] = cmd; if (status_event[type].decode) status_event[type].decode(ev, xbuf + 1); qlen = status_event[type].qlen + 1; } else { spin_unlock_irqrestore(&dev->lock, flags); if (status_event[type].decode) status_event[type].decode(ev, xbuf + 0); qlen = status_event[type].qlen; } if (count < qlen) return -ENOMEM; memcpy(buf, xbuf, qlen); return qlen; } } EXPORT_SYMBOL(snd_midi_event_decode); /* decode note event */ static void note_decode(struct snd_seq_event *ev, unsigned char *buf) { buf[0] = ev->data.note.note & 0x7f; buf[1] = ev->data.note.velocity & 0x7f; } /* decode one parameter controls */ static void one_param_decode(struct snd_seq_event *ev, unsigned char *buf) { buf[0] = ev->data.control.value & 0x7f; } /* decode pitch wheel change */ static void pitchbend_decode(struct snd_seq_event *ev, unsigned char *buf) { int value = ev->data.control.value + 8192; buf[0] = value & 0x7f; buf[1] = (value >> 7) & 0x7f; } /* decode midi control change */ static void two_param_decode(struct snd_seq_event *ev, unsigned char *buf) { buf[0] = ev->data.control.param & 0x7f; buf[1] = ev->data.control.value & 0x7f; } /* decode song position */ static void songpos_decode(struct snd_seq_event *ev, unsigned char *buf) { buf[0] = ev->data.control.value & 0x7f; buf[1] = (ev->data.control.value >> 7) & 0x7f; } /* decode 14bit control */ static int extra_decode_ctrl14(struct snd_midi_event *dev, unsigned char *buf, int count, struct snd_seq_event *ev) { unsigned char cmd; int idx = 0; cmd = MIDI_CMD_CONTROL|(ev->data.control.channel & 0x0f); if (ev->data.control.param < 0x20) { if (count < 4) return -ENOMEM; if (dev->nostat && count < 6) return -ENOMEM; if (cmd != dev->lastcmd || dev->nostat) { if (count < 5) return -ENOMEM; buf[idx++] = dev->lastcmd = cmd; } buf[idx++] = ev->data.control.param; buf[idx++] = (ev->data.control.value >> 7) & 0x7f; if (dev->nostat) buf[idx++] = cmd; buf[idx++] = ev->data.control.param + 0x20; buf[idx++] = ev->data.control.value & 0x7f; } else { if (count < 2) return -ENOMEM; if (cmd != dev->lastcmd || dev->nostat) { if (count < 3) return -ENOMEM; buf[idx++] = dev->lastcmd = cmd; } buf[idx++] = ev->data.control.param & 0x7f; buf[idx++] = ev->data.control.value & 0x7f; } return idx; } /* decode reg/nonreg param */ static int extra_decode_xrpn(struct snd_midi_event *dev, unsigned char *buf, int count, struct snd_seq_event *ev) { unsigned char cmd; const char *cbytes; static const char cbytes_nrpn[4] = { MIDI_CTL_NONREG_PARM_NUM_MSB, MIDI_CTL_NONREG_PARM_NUM_LSB, MIDI_CTL_MSB_DATA_ENTRY, MIDI_CTL_LSB_DATA_ENTRY }; static const char cbytes_rpn[4] = { MIDI_CTL_REGIST_PARM_NUM_MSB, MIDI_CTL_REGIST_PARM_NUM_LSB, MIDI_CTL_MSB_DATA_ENTRY, MIDI_CTL_LSB_DATA_ENTRY }; unsigned char bytes[4]; int idx = 0, i; if (count < 8) return -ENOMEM; if (dev->nostat && count < 12) return -ENOMEM; cmd = MIDI_CMD_CONTROL|(ev->data.control.channel & 0x0f); bytes[0] = (ev->data.control.param & 0x3f80) >> 7; bytes[1] = ev->data.control.param & 0x007f; bytes[2] = (ev->data.control.value & 0x3f80) >> 7; bytes[3] = ev->data.control.value & 0x007f; if (cmd != dev->lastcmd && !dev->nostat) { if (count < 9) return -ENOMEM; buf[idx++] = dev->lastcmd = cmd; } cbytes = ev->type == SNDRV_SEQ_EVENT_NONREGPARAM ? cbytes_nrpn : cbytes_rpn; for (i = 0; i < 4; i++) { if (dev->nostat) buf[idx++] = dev->lastcmd = cmd; buf[idx++] = cbytes[i]; buf[idx++] = bytes[i]; } return idx; } |
| 14 14 14 11 14 14 14 14 14 14 14 14 14 14 14 14 14 9 14 14 14 14 14 14 14 14 4 3 14 14 14 14 14 14 9 14 14 13 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 13 14 14 14 14 14 14 14 13 14 14 14 14 13 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 | // SPDX-License-Identifier: GPL-2.0 /* * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, * for the blk-mq scheduling framework * * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/rbtree.h> #include <linux/sbitmap.h> #include <trace/events/block.h> #include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" /* * See Documentation/block/deadline-iosched.rst */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ /* * Time after which to dispatch lower priority requests even if higher * priority requests are pending. */ static const int prio_aging_expire = 10 * HZ; static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ enum dd_data_dir { DD_READ = READ, DD_WRITE = WRITE, }; enum { DD_DIR_COUNT = 2 }; enum dd_prio { DD_RT_PRIO = 0, DD_BE_PRIO = 1, DD_IDLE_PRIO = 2, DD_PRIO_MAX = 2, }; enum { DD_PRIO_COUNT = 3 }; /* * I/O statistics per I/O priority. It is fine if these counters overflow. * What matters is that these counters are at least as wide as * log2(max_outstanding_requests). */ struct io_stats_per_prio { uint32_t inserted; uint32_t merged; uint32_t dispatched; atomic_t completed; }; /* * Deadline scheduler data per I/O priority (enum dd_prio). Requests are * present on both sort_list[] and fifo_list[]. */ struct dd_per_prio { struct rb_root sort_list[DD_DIR_COUNT]; struct list_head fifo_list[DD_DIR_COUNT]; /* Position of the most recently dispatched request. */ sector_t latest_pos[DD_DIR_COUNT]; struct io_stats_per_prio stats; }; struct deadline_data { /* * run time data */ struct list_head dispatch; struct dd_per_prio per_prio[DD_PRIO_COUNT]; /* Data direction of latest dispatched request. */ enum dd_data_dir last_dir; unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ /* * settings that change how the i/o scheduler behaves */ int fifo_expire[DD_DIR_COUNT]; int fifo_batch; int writes_starved; int front_merges; u32 async_depth; int prio_aging_expire; spinlock_t lock; }; /* Maps an I/O priority class to a deadline scheduler priority. */ static const enum dd_prio ioprio_class_to_prio[] = { [IOPRIO_CLASS_NONE] = DD_BE_PRIO, [IOPRIO_CLASS_RT] = DD_RT_PRIO, [IOPRIO_CLASS_BE] = DD_BE_PRIO, [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, }; static inline struct rb_root * deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) { return &per_prio->sort_list[rq_data_dir(rq)]; } /* * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a * request. */ static u8 dd_rq_ioclass(struct request *rq) { return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); } /* * Return the first request for which blk_rq_pos() >= @pos. */ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio, enum dd_data_dir data_dir, sector_t pos) { struct rb_node *node = per_prio->sort_list[data_dir].rb_node; struct request *rq, *res = NULL; while (node) { rq = rb_entry_rq(node); if (blk_rq_pos(rq) >= pos) { res = rq; node = node->rb_left; } else { node = node->rb_right; } } return res; } static void deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) { struct rb_root *root = deadline_rb_root(per_prio, rq); elv_rb_add(root, rq); } static inline void deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) { elv_rb_del(deadline_rb_root(per_prio, rq), rq); } /* * remove rq from rbtree and fifo. */ static void deadline_remove_request(struct request_queue *q, struct dd_per_prio *per_prio, struct request *rq) { list_del_init(&rq->queuelist); /* * We might not be on the rbtree, if we are doing an insert merge */ if (!RB_EMPTY_NODE(&rq->rb_node)) deadline_del_rq_rb(per_prio, rq); elv_rqhash_del(q, rq); if (q->last_merge == rq) q->last_merge = NULL; } static void dd_request_merged(struct request_queue *q, struct request *req, enum elv_merge type) { struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = dd_rq_ioclass(req); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; /* * if the merge was a front merge, we need to reposition request */ if (type == ELEVATOR_FRONT_MERGE) { elv_rb_del(deadline_rb_root(per_prio, req), req); deadline_add_rq_rb(per_prio, req); } } /* * Callback function that is invoked after @next has been merged into @req. */ static void dd_merged_requests(struct request_queue *q, struct request *req, struct request *next) { struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; lockdep_assert_held(&dd->lock); dd->per_prio[prio].stats.merged++; /* * if next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (time_before((unsigned long)next->fifo_time, (unsigned long)req->fifo_time)) { list_move(&req->queuelist, &next->queuelist); req->fifo_time = next->fifo_time; } } /* * kill knowledge of next, this one is a goner */ deadline_remove_request(q, &dd->per_prio[prio], next); } /* * move an entry to dispatch queue */ static void deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, struct request *rq) { /* * take it off the sort and fifo list */ deadline_remove_request(rq->q, per_prio, rq); } /* Number of requests queued for a given priority level. */ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) { const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; lockdep_assert_held(&dd->lock); return stats->inserted - atomic_read(&stats->completed); } /* * deadline_check_fifo returns true if and only if there are expired requests * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]). */ static inline bool deadline_check_fifo(struct dd_per_prio *per_prio, enum dd_data_dir data_dir) { struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); return time_is_before_eq_jiffies((unsigned long)rq->fifo_time); } /* * For the specified data direction, return the next request to * dispatch using arrival ordered lists. */ static struct request * deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, enum dd_data_dir data_dir) { if (list_empty(&per_prio->fifo_list[data_dir])) return NULL; return rq_entry_fifo(per_prio->fifo_list[data_dir].next); } /* * For the specified data direction, return the next request to * dispatch using sector position sorted lists. */ static struct request * deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, enum dd_data_dir data_dir) { return deadline_from_pos(per_prio, data_dir, per_prio->latest_pos[data_dir]); } /* * Returns true if and only if @rq started after @latest_start where * @latest_start is in jiffies. */ static bool started_after(struct deadline_data *dd, struct request *rq, unsigned long latest_start) { unsigned long start_time = (unsigned long)rq->fifo_time; start_time -= dd->fifo_expire[rq_data_dir(rq)]; return time_after(start_time, latest_start); } static struct request *dd_start_request(struct deadline_data *dd, enum dd_data_dir data_dir, struct request *rq) { u8 ioprio_class = dd_rq_ioclass(rq); enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq); dd->per_prio[prio].stats.dispatched++; rq->rq_flags |= RQF_STARTED; return rq; } /* * deadline_dispatch_requests selects the best request according to * read/write expire, fifo_batch, etc and with a start time <= @latest_start. */ static struct request *__dd_dispatch_request(struct deadline_data *dd, struct dd_per_prio *per_prio, unsigned long latest_start) { struct request *rq, *next_rq; enum dd_data_dir data_dir; lockdep_assert_held(&dd->lock); /* * batches are currently reads XOR writes */ rq = deadline_next_request(dd, per_prio, dd->last_dir); if (rq && dd->batching < dd->fifo_batch) { /* we have a next request and are still entitled to batch */ data_dir = rq_data_dir(rq); goto dispatch_request; } /* * at this point we are not running a batch. select the appropriate * data direction (read / write) */ if (!list_empty(&per_prio->fifo_list[DD_READ])) { BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); if (deadline_fifo_request(dd, per_prio, DD_WRITE) && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; data_dir = DD_READ; goto dispatch_find_request; } /* * there are either no reads or writes have been starved */ if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { dispatch_writes: BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); dd->starved = 0; data_dir = DD_WRITE; goto dispatch_find_request; } return NULL; dispatch_find_request: /* * we are not running a batch, find best request for selected data_dir */ next_rq = deadline_next_request(dd, per_prio, data_dir); if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { /* * A deadline has expired, the last request was in the other * direction, or we have run out of higher-sectored requests. * Start again from the request with the earliest expiry time. */ rq = deadline_fifo_request(dd, per_prio, data_dir); } else { /* * The last req was the same dir and we have a next request in * sort order. No expired requests so continue on from here. */ rq = next_rq; } if (!rq) return NULL; dd->last_dir = data_dir; dd->batching = 0; dispatch_request: if (started_after(dd, rq, latest_start)) return NULL; /* * rq is the selected appropriate request. */ dd->batching++; deadline_move_request(dd, per_prio, rq); return dd_start_request(dd, data_dir, rq); } /* * Check whether there are any requests with priority other than DD_RT_PRIO * that were inserted more than prio_aging_expire jiffies ago. */ static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, unsigned long now) { struct request *rq; enum dd_prio prio; int prio_cnt; lockdep_assert_held(&dd->lock); prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + !!dd_queued(dd, DD_IDLE_PRIO); if (prio_cnt < 2) return NULL; for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now - dd->prio_aging_expire); if (rq) return rq; } return NULL; } /* * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). * * One confusing aspect here is that we get called for a specific * hardware queue, but we may return a request that is for a * different hardware queue. This is because mq-deadline has shared * state for all hardware queues, in terms of sorting, FIFOs, etc. */ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; const unsigned long now = jiffies; struct request *rq; enum dd_prio prio; spin_lock(&dd->lock); if (!list_empty(&dd->dispatch)) { rq = list_first_entry(&dd->dispatch, struct request, queuelist); list_del_init(&rq->queuelist); dd_start_request(dd, rq_data_dir(rq), rq); goto unlock; } rq = dd_dispatch_prio_aged_requests(dd, now); if (rq) goto unlock; /* * Next, dispatch requests in priority order. Ignore lower priority * requests if any higher priority requests are pending. */ for (prio = 0; prio <= DD_PRIO_MAX; prio++) { rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now); if (rq || dd_queued(dd, prio)) break; } unlock: spin_unlock(&dd->lock); return rq; } /* * Called by __blk_mq_alloc_request(). The shallow_depth value set by this * function is used by __blk_mq_get_tag(). */ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { struct deadline_data *dd = data->q->elevator->elevator_data; /* Do not throttle synchronous reads. */ if (op_is_sync(opf) && !op_is_write(opf)) return; /* * Throttle asynchronous requests and writes such that these requests * do not block the allocation of synchronous requests. */ data->shallow_depth = dd->async_depth; } /* Called by blk_mq_update_nr_requests(). */ static void dd_depth_updated(struct request_queue *q) { struct deadline_data *dd = q->elevator->elevator_data; dd->async_depth = q->nr_requests; blk_mq_set_min_shallow_depth(q, 1); } static void dd_exit_sched(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; enum dd_prio prio; for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; const struct io_stats_per_prio *stats = &per_prio->stats; uint32_t queued; WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); spin_lock(&dd->lock); queued = dd_queued(dd, prio); spin_unlock(&dd->lock); WARN_ONCE(queued != 0, "statistics for priority %d: i %u m %u d %u c %u\n", prio, stats->inserted, stats->merged, stats->dispatched, atomic_read(&stats->completed)); } kfree(dd); } /* * initialize elevator private data (deadline_data). */ static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq) { struct deadline_data *dd; enum dd_prio prio; dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); if (!dd) return -ENOMEM; eq->elevator_data = dd; INIT_LIST_HEAD(&dd->dispatch); for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); per_prio->sort_list[DD_READ] = RB_ROOT; per_prio->sort_list[DD_WRITE] = RB_ROOT; } dd->fifo_expire[DD_READ] = read_expire; dd->fifo_expire[DD_WRITE] = write_expire; dd->writes_starved = writes_starved; dd->front_merges = 1; dd->last_dir = DD_WRITE; dd->fifo_batch = fifo_batch; dd->prio_aging_expire = prio_aging_expire; spin_lock_init(&dd->lock); /* We dispatch from request queue wide instead of hw queue */ blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); q->elevator = eq; dd_depth_updated(q); return 0; } /* * Try to merge @bio into an existing request. If @bio has been merged into * an existing request, store the pointer to that request into *@rq. */ static int dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; sector_t sector = bio_end_sector(bio); struct request *__rq; if (!dd->front_merges) return ELEVATOR_NO_MERGE; __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); if (__rq) { BUG_ON(sector != blk_rq_pos(__rq)); if (elv_bio_merge_ok(__rq, bio)) { *rq = __rq; if (blk_discard_mergable(__rq)) return ELEVATOR_DISCARD_MERGE; return ELEVATOR_FRONT_MERGE; } } return ELEVATOR_NO_MERGE; } /* * Attempt to merge a bio into an existing request. This function is called * before @bio is associated with a request. */ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct deadline_data *dd = q->elevator->elevator_data; struct request *free = NULL; bool ret; spin_lock(&dd->lock); ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); spin_unlock(&dd->lock); if (free) blk_mq_free_request(free); return ret; } /* * add rq to rbtree and fifo */ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags, struct list_head *free) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; const enum dd_data_dir data_dir = rq_data_dir(rq); u16 ioprio = req_get_ioprio(rq); u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); struct dd_per_prio *per_prio; enum dd_prio prio; lockdep_assert_held(&dd->lock); prio = ioprio_class_to_prio[ioprio_class]; per_prio = &dd->per_prio[prio]; if (!rq->elv.priv[0]) per_prio->stats.inserted++; rq->elv.priv[0] = per_prio; if (blk_mq_sched_try_insert_merge(q, rq, free)) return; trace_block_rq_insert(rq); if (flags & BLK_MQ_INSERT_AT_HEAD) { list_add(&rq->queuelist, &dd->dispatch); rq->fifo_time = jiffies; } else { deadline_add_rq_rb(per_prio, rq); if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } /* * set expire time and add to fifo list */ rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); } } /* * Called from blk_mq_insert_request() or blk_mq_dispatch_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, blk_insert_t flags) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; LIST_HEAD(free); spin_lock(&dd->lock); while (!list_empty(list)) { struct request *rq; rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); dd_insert_request(hctx, rq, flags, &free); } spin_unlock(&dd->lock); blk_mq_free_requests(&free); } /* Callback from inside blk_mq_rq_ctx_init(). */ static void dd_prepare_request(struct request *rq) { rq->elv.priv[0] = NULL; } /* * Callback from inside blk_mq_free_request(). */ static void dd_finish_request(struct request *rq) { struct dd_per_prio *per_prio = rq->elv.priv[0]; /* * The block layer core may call dd_finish_request() without having * called dd_insert_requests(). Skip requests that bypassed I/O * scheduling. See also blk_mq_request_bypass_insert(). */ if (per_prio) atomic_inc(&per_prio->stats.completed); } static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) { return !list_empty_careful(&per_prio->fifo_list[DD_READ]) || !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); } static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; enum dd_prio prio; if (!list_empty_careful(&dd->dispatch)) return true; for (prio = 0; prio <= DD_PRIO_MAX; prio++) if (dd_has_work_for_prio(&dd->per_prio[prio])) return true; return false; } /* * sysfs parts below */ #define SHOW_INT(__FUNC, __VAR) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct deadline_data *dd = e->elevator_data; \ \ return sysfs_emit(page, "%d\n", __VAR); \ } #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); SHOW_INT(deadline_async_depth_show, dd->async_depth); SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); #undef SHOW_INT #undef SHOW_JIFFIES #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct deadline_data *dd = e->elevator_data; \ int __data, __ret; \ \ __ret = kstrtoint(page, 0, &__data); \ if (__ret < 0) \ return __ret; \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ *(__PTR) = __CONV(__data); \ return count; \ } #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION #undef STORE_INT #undef STORE_JIFFIES #define DD_ATTR(name) \ __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) static const struct elv_fs_entry deadline_attrs[] = { DD_ATTR(read_expire), DD_ATTR(write_expire), DD_ATTR(writes_starved), DD_ATTR(front_merges), DD_ATTR(async_depth), DD_ATTR(fifo_batch), DD_ATTR(prio_aging_expire), __ATTR_NULL }; #ifdef CONFIG_BLK_DEBUG_FS #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ static void *deadline_##name##_fifo_start(struct seq_file *m, \ loff_t *pos) \ __acquires(&dd->lock) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ \ spin_lock(&dd->lock); \ return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ } \ \ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ loff_t *pos) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ \ return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ } \ \ static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ __releases(&dd->lock) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ \ spin_unlock(&dd->lock); \ } \ \ static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ .start = deadline_##name##_fifo_start, \ .next = deadline_##name##_fifo_next, \ .stop = deadline_##name##_fifo_stop, \ .show = blk_mq_debugfs_rq_show, \ }; \ \ static int deadline_##name##_next_rq_show(void *data, \ struct seq_file *m) \ { \ struct request_queue *q = data; \ struct deadline_data *dd = q->elevator->elevator_data; \ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ struct request *rq; \ \ rq = deadline_from_pos(per_prio, data_dir, \ per_prio->latest_pos[data_dir]); \ if (rq) \ __blk_mq_debugfs_rq_show(m, rq); \ return 0; \ } DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); #undef DEADLINE_DEBUGFS_DDIR_ATTRS static int deadline_batching_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; seq_printf(m, "%u\n", dd->batching); return 0; } static int deadline_starved_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; seq_printf(m, "%u\n", dd->starved); return 0; } static int dd_async_depth_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; seq_printf(m, "%u\n", dd->async_depth); return 0; } static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; u32 rt, be, idle; spin_lock(&dd->lock); rt = dd_queued(dd, DD_RT_PRIO); be = dd_queued(dd, DD_BE_PRIO); idle = dd_queued(dd, DD_IDLE_PRIO); spin_unlock(&dd->lock); seq_printf(m, "%u %u %u\n", rt, be, idle); return 0; } /* Number of requests owned by the block driver for a given priority. */ static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) { const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; lockdep_assert_held(&dd->lock); return stats->dispatched + stats->merged - atomic_read(&stats->completed); } static int dd_owned_by_driver_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; u32 rt, be, idle; spin_lock(&dd->lock); rt = dd_owned_by_driver(dd, DD_RT_PRIO); be = dd_owned_by_driver(dd, DD_BE_PRIO); idle = dd_owned_by_driver(dd, DD_IDLE_PRIO); spin_unlock(&dd->lock); seq_printf(m, "%u %u %u\n", rt, be, idle); return 0; } static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&dd->lock) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; spin_lock(&dd->lock); return seq_list_start(&dd->dispatch, *pos); } static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; return seq_list_next(v, &dd->dispatch, pos); } static void deadline_dispatch_stop(struct seq_file *m, void *v) __releases(&dd->lock) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; spin_unlock(&dd->lock); } static const struct seq_operations deadline_dispatch_seq_ops = { .start = deadline_dispatch_start, .next = deadline_dispatch_next, .stop = deadline_dispatch_stop, .show = blk_mq_debugfs_rq_show, }; #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ {#name "_fifo_list", 0400, \ .seq_ops = &deadline_##name##_fifo_seq_ops} #define DEADLINE_NEXT_RQ_ATTR(name) \ {#name "_next_rq", 0400, deadline_##name##_next_rq_show} static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { DEADLINE_QUEUE_DDIR_ATTRS(read0), DEADLINE_QUEUE_DDIR_ATTRS(write0), DEADLINE_QUEUE_DDIR_ATTRS(read1), DEADLINE_QUEUE_DDIR_ATTRS(write1), DEADLINE_QUEUE_DDIR_ATTRS(read2), DEADLINE_QUEUE_DDIR_ATTRS(write2), DEADLINE_NEXT_RQ_ATTR(read0), DEADLINE_NEXT_RQ_ATTR(write0), DEADLINE_NEXT_RQ_ATTR(read1), DEADLINE_NEXT_RQ_ATTR(write1), DEADLINE_NEXT_RQ_ATTR(read2), DEADLINE_NEXT_RQ_ATTR(write2), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, {"async_depth", 0400, dd_async_depth_show}, {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, {"owned_by_driver", 0400, dd_owned_by_driver_show}, {"queued", 0400, dd_queued_show}, {}, }; #undef DEADLINE_QUEUE_DDIR_ATTRS #endif static struct elevator_type mq_deadline = { .ops = { .depth_updated = dd_depth_updated, .limit_depth = dd_limit_depth, .insert_requests = dd_insert_requests, .dispatch_request = dd_dispatch_request, .prepare_request = dd_prepare_request, .finish_request = dd_finish_request, .next_request = elv_rb_latter_request, .former_request = elv_rb_former_request, .bio_merge = dd_bio_merge, .request_merge = dd_request_merge, .requests_merged = dd_merged_requests, .request_merged = dd_request_merged, .has_work = dd_has_work, .init_sched = dd_init_sched, .exit_sched = dd_exit_sched, }, #ifdef CONFIG_BLK_DEBUG_FS .queue_debugfs_attrs = deadline_queue_debugfs_attrs, #endif .elevator_attrs = deadline_attrs, .elevator_name = "mq-deadline", .elevator_alias = "deadline", .elevator_owner = THIS_MODULE, }; MODULE_ALIAS("mq-deadline-iosched"); static int __init deadline_init(void) { return elv_register(&mq_deadline); } static void __exit deadline_exit(void) { elv_unregister(&mq_deadline); } module_init(deadline_init); module_exit(deadline_exit); MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MQ deadline IO scheduler"); |
| 1 1 4 4 3 1 1 2 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 | /* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "htc.h" static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd) { switch (wmi_cmd) { case WMI_ECHO_CMDID: return "WMI_ECHO_CMDID"; case WMI_ACCESS_MEMORY_CMDID: return "WMI_ACCESS_MEMORY_CMDID"; case WMI_GET_FW_VERSION: return "WMI_GET_FW_VERSION"; case WMI_DISABLE_INTR_CMDID: return "WMI_DISABLE_INTR_CMDID"; case WMI_ENABLE_INTR_CMDID: return "WMI_ENABLE_INTR_CMDID"; case WMI_ATH_INIT_CMDID: return "WMI_ATH_INIT_CMDID"; case WMI_ABORT_TXQ_CMDID: return "WMI_ABORT_TXQ_CMDID"; case WMI_STOP_TX_DMA_CMDID: return "WMI_STOP_TX_DMA_CMDID"; case WMI_ABORT_TX_DMA_CMDID: return "WMI_ABORT_TX_DMA_CMDID"; case WMI_DRAIN_TXQ_CMDID: return "WMI_DRAIN_TXQ_CMDID"; case WMI_DRAIN_TXQ_ALL_CMDID: return "WMI_DRAIN_TXQ_ALL_CMDID"; case WMI_START_RECV_CMDID: return "WMI_START_RECV_CMDID"; case WMI_STOP_RECV_CMDID: return "WMI_STOP_RECV_CMDID"; case WMI_FLUSH_RECV_CMDID: return "WMI_FLUSH_RECV_CMDID"; case WMI_SET_MODE_CMDID: return "WMI_SET_MODE_CMDID"; case WMI_NODE_CREATE_CMDID: return "WMI_NODE_CREATE_CMDID"; case WMI_NODE_REMOVE_CMDID: return "WMI_NODE_REMOVE_CMDID"; case WMI_VAP_REMOVE_CMDID: return "WMI_VAP_REMOVE_CMDID"; case WMI_VAP_CREATE_CMDID: return "WMI_VAP_CREATE_CMDID"; case WMI_REG_READ_CMDID: return "WMI_REG_READ_CMDID"; case WMI_REG_WRITE_CMDID: return "WMI_REG_WRITE_CMDID"; case WMI_REG_RMW_CMDID: return "WMI_REG_RMW_CMDID"; case WMI_RC_STATE_CHANGE_CMDID: return "WMI_RC_STATE_CHANGE_CMDID"; case WMI_RC_RATE_UPDATE_CMDID: return "WMI_RC_RATE_UPDATE_CMDID"; case WMI_TARGET_IC_UPDATE_CMDID: return "WMI_TARGET_IC_UPDATE_CMDID"; case WMI_TX_AGGR_ENABLE_CMDID: return "WMI_TX_AGGR_ENABLE_CMDID"; case WMI_TGT_DETACH_CMDID: return "WMI_TGT_DETACH_CMDID"; case WMI_NODE_UPDATE_CMDID: return "WMI_NODE_UPDATE_CMDID"; case WMI_INT_STATS_CMDID: return "WMI_INT_STATS_CMDID"; case WMI_TX_STATS_CMDID: return "WMI_TX_STATS_CMDID"; case WMI_RX_STATS_CMDID: return "WMI_RX_STATS_CMDID"; case WMI_BITRATE_MASK_CMDID: return "WMI_BITRATE_MASK_CMDID"; } return "Bogus"; } struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) { struct wmi *wmi; wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL); if (!wmi) return NULL; wmi->drv_priv = priv; wmi->stopped = false; skb_queue_head_init(&wmi->wmi_event_queue); spin_lock_init(&wmi->wmi_lock); spin_lock_init(&wmi->event_lock); mutex_init(&wmi->op_mutex); mutex_init(&wmi->multi_write_mutex); mutex_init(&wmi->multi_rmw_mutex); init_completion(&wmi->cmd_wait); INIT_LIST_HEAD(&wmi->pending_tx_events); tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet); return wmi; } void ath9k_stop_wmi(struct ath9k_htc_priv *priv) { struct wmi *wmi = priv->wmi; mutex_lock(&wmi->op_mutex); wmi->stopped = true; mutex_unlock(&wmi->op_mutex); } void ath9k_destroy_wmi(struct ath9k_htc_priv *priv) { kfree(priv->wmi); } void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv) { unsigned long flags; tasklet_kill(&priv->wmi->wmi_event_tasklet); spin_lock_irqsave(&priv->wmi->wmi_lock, flags); __skb_queue_purge(&priv->wmi->wmi_event_queue); spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags); } void ath9k_wmi_event_tasklet(struct tasklet_struct *t) { struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet); struct ath9k_htc_priv *priv = wmi->drv_priv; struct wmi_cmd_hdr *hdr; void *wmi_event; struct wmi_event_swba *swba; struct sk_buff *skb = NULL; unsigned long flags; u16 cmd_id; do { spin_lock_irqsave(&wmi->wmi_lock, flags); skb = __skb_dequeue(&wmi->wmi_event_queue); if (!skb) { spin_unlock_irqrestore(&wmi->wmi_lock, flags); return; } spin_unlock_irqrestore(&wmi->wmi_lock, flags); /* Check if ath9k_htc_probe_device() completed. */ if (!data_race(priv->initialized)) { kfree_skb(skb); continue; } hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); switch (cmd_id) { case WMI_SWBA_EVENTID: swba = wmi_event; ath9k_htc_swba(priv, swba); break; case WMI_FATAL_EVENTID: ieee80211_queue_work(wmi->drv_priv->hw, &wmi->drv_priv->fatal_work); break; case WMI_TXSTATUS_EVENTID: spin_lock_bh(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { spin_unlock_bh(&priv->tx.tx_lock); break; } spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_txstatus(priv, wmi_event); break; default: break; } kfree_skb(skb); } while (1); } void ath9k_fatal_work(struct work_struct *work) { struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, fatal_work); struct ath_common *common = ath9k_hw_common(priv->ah); ath_dbg(common, FATAL, "FATAL Event received, resetting device\n"); ath9k_htc_reset(priv); } static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb) { skb_pull(skb, sizeof(struct wmi_cmd_hdr)); if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0) memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len); complete(&wmi->cmd_wait); } static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, enum htc_endpoint_id epid) { struct wmi *wmi = priv; struct wmi_cmd_hdr *hdr; unsigned long flags; u16 cmd_id; if (unlikely(wmi->stopped)) goto free_skb; /* Validate the obtained SKB. */ if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr))) goto free_skb; hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); if (cmd_id & 0x1000) { spin_lock_irqsave(&wmi->wmi_lock, flags); __skb_queue_tail(&wmi->wmi_event_queue, skb); spin_unlock_irqrestore(&wmi->wmi_lock, flags); tasklet_schedule(&wmi->wmi_event_tasklet); return; } /* Check if there has been a timeout. */ spin_lock_irqsave(&wmi->wmi_lock, flags); if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) { spin_unlock_irqrestore(&wmi->wmi_lock, flags); goto free_skb; } /* WMI command response */ ath9k_wmi_rsp_callback(wmi, skb); spin_unlock_irqrestore(&wmi->wmi_lock, flags); free_skb: kfree_skb(skb); } static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb, enum htc_endpoint_id epid, bool txok) { kfree_skb(skb); } int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, enum htc_endpoint_id *wmi_ctrl_epid) { struct htc_service_connreq connect; int ret; wmi->htc = htc; memset(&connect, 0, sizeof(connect)); connect.ep_callbacks.priv = wmi; connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx; connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx; connect.service_id = WMI_CONTROL_SVC; ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid); if (ret) return ret; *wmi_ctrl_epid = wmi->ctrl_epid; return 0; } static int ath9k_wmi_cmd_issue(struct wmi *wmi, struct sk_buff *skb, enum wmi_cmd_id cmd, u16 len, u8 *rsp_buf, u32 rsp_len) { struct wmi_cmd_hdr *hdr; unsigned long flags; hdr = skb_push(skb, sizeof(struct wmi_cmd_hdr)); hdr->command_id = cpu_to_be16(cmd); hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); spin_lock_irqsave(&wmi->wmi_lock, flags); /* record the rsp buffer and length */ wmi->cmd_rsp_buf = rsp_buf; wmi->cmd_rsp_len = rsp_len; wmi->last_seq_id = wmi->tx_seq_id; spin_unlock_irqrestore(&wmi->wmi_lock, flags); return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid); } int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, u8 *cmd_buf, u32 cmd_len, u8 *rsp_buf, u32 rsp_len, u32 timeout) { struct ath_hw *ah = wmi->drv_priv->ah; struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); unsigned long time_left, flags; struct sk_buff *skb; int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) return 0; skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, headroom); if (cmd_len != 0 && cmd_buf != NULL) { skb_put_data(skb, cmd_buf, cmd_len); } mutex_lock(&wmi->op_mutex); /* check if wmi stopped flag is set */ if (unlikely(wmi->stopped)) { ret = -EPROTO; goto out; } ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len); if (ret) goto out; time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); spin_lock_irqsave(&wmi->wmi_lock, flags); wmi->last_seq_id = 0; spin_unlock_irqrestore(&wmi->wmi_lock, flags); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } mutex_unlock(&wmi->op_mutex); return 0; out: ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return ret; } |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 11 12 2 2 2 2 2 2 2 2 2 11 11 2 2 2 2 2 2 2 2 2 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 | // SPDX-License-Identifier: GPL-2.0-only /* * scsi_sysfs.c * * SCSI sysfs interface routines. * * Created to pull SCSI mid layer sysfs routines into one file. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dh.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_devinfo.h> #include "scsi_priv.h" #include "scsi_logging.h" static const struct device_type scsi_dev_type; static const struct { enum scsi_device_state value; char *name; } sdev_states[] = { { SDEV_CREATED, "created" }, { SDEV_RUNNING, "running" }, { SDEV_CANCEL, "cancel" }, { SDEV_DEL, "deleted" }, { SDEV_QUIESCE, "quiesce" }, { SDEV_OFFLINE, "offline" }, { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, { SDEV_BLOCK, "blocked" }, { SDEV_CREATED_BLOCK, "created-blocked" }, }; const char *scsi_device_state_name(enum scsi_device_state state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { if (sdev_states[i].value == state) { name = sdev_states[i].name; break; } } return name; } static const struct { enum scsi_host_state value; char *name; } shost_states[] = { { SHOST_CREATED, "created" }, { SHOST_RUNNING, "running" }, { SHOST_CANCEL, "cancel" }, { SHOST_DEL, "deleted" }, { SHOST_RECOVERY, "recovery" }, { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, { SHOST_DEL_RECOVERY, "deleted/recovery", }, }; const char *scsi_host_state_name(enum scsi_host_state state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(shost_states); i++) { if (shost_states[i].value == state) { name = shost_states[i].name; break; } } return name; } #ifdef CONFIG_SCSI_DH static const struct { unsigned char value; char *name; } sdev_access_states[] = { { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" }, { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" }, { SCSI_ACCESS_STATE_STANDBY, "standby" }, { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" }, { SCSI_ACCESS_STATE_LBA, "lba-dependent" }, { SCSI_ACCESS_STATE_OFFLINE, "offline" }, { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, }; static const char *scsi_access_state_name(unsigned char state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) { if (sdev_access_states[i].value == state) { name = sdev_access_states[i].name; break; } } return name; } #endif static int check_set(unsigned long long *val, char *src) { char *last; if (strcmp(src, "-") == 0) { *val = SCAN_WILD_CARD; } else { /* * Doesn't check for int overflow */ *val = simple_strtoull(src, &last, 0); if (*last != '\0') return 1; } return 0; } static int scsi_scan(struct Scsi_Host *shost, const char *str) { char s1[15], s2[15], s3[17], junk; unsigned long long channel, id, lun; int res; res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); if (res != 3) return -EINVAL; if (check_set(&channel, s1)) return -EINVAL; if (check_set(&id, s2)) return -EINVAL; if (check_set(&lun, s3)) return -EINVAL; if (shost->transportt->user_scan) res = shost->transportt->user_scan(shost, channel, id, lun); else res = scsi_scan_host_selected(shost, channel, id, lun, SCSI_SCAN_MANUAL); return res; } /* * shost_show_function: macro to create an attr function that can be used to * show a non-bit field. */ #define shost_show_function(name, field, format_string) \ static ssize_t \ show_##name (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev); \ return snprintf (buf, 20, format_string, shost->field); \ } /* * shost_rd_attr: macro to create a function and attribute variable for a * read only field. */ #define shost_rd_attr2(name, field, format_string) \ shost_show_function(name, field, format_string) \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); #define shost_rd_attr(field, format_string) \ shost_rd_attr2(field, field, format_string) /* * Create the actual show/store functions and data structures. */ static ssize_t store_scan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); int res; res = scsi_scan(shost, buf); if (res == 0) res = count; return res; }; static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); static ssize_t store_shost_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int i; struct Scsi_Host *shost = class_to_shost(dev); enum scsi_host_state state = 0; for (i = 0; i < ARRAY_SIZE(shost_states); i++) { const int len = strlen(shost_states[i].name); if (strncmp(shost_states[i].name, buf, len) == 0 && buf[len] == '\n') { state = shost_states[i].value; break; } } if (!state) return -EINVAL; if (scsi_host_set_state(shost, state)) return -EINVAL; return count; } static ssize_t show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); const char *name = scsi_host_state_name(shost->shost_state); if (!name) return -EINVAL; return snprintf(buf, 20, "%s\n", name); } /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ static struct device_attribute dev_attr_hstate = __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); static ssize_t show_shost_mode(unsigned int mode, char *buf) { ssize_t len = 0; if (mode & MODE_INITIATOR) len = sprintf(buf, "%s", "Initiator"); if (mode & MODE_TARGET) len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); len += sprintf(buf + len, "\n"); return len; } static ssize_t show_shost_supported_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); unsigned int supported_mode = shost->hostt->supported_mode; if (supported_mode == MODE_UNKNOWN) /* by default this should be initiator */ supported_mode = MODE_INITIATOR; return show_shost_mode(supported_mode, buf); } static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); static ssize_t show_shost_active_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); if (shost->active_mode == MODE_UNKNOWN) return snprintf(buf, 20, "unknown\n"); else return show_shost_mode(shost->active_mode, buf); } static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); static int check_reset_type(const char *str) { if (sysfs_streq(str, "adapter")) return SCSI_ADAPTER_RESET; else if (sysfs_streq(str, "firmware")) return SCSI_FIRMWARE_RESET; else return 0; } static ssize_t store_host_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); const struct scsi_host_template *sht = shost->hostt; int ret = -EINVAL; int type; type = check_reset_type(buf); if (!type) goto exit_store_host_reset; if (sht->host_reset) ret = sht->host_reset(shost, type); else ret = -EOPNOTSUPP; exit_store_host_reset: if (ret == 0) ret = count; return ret; } static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); static ssize_t show_shost_eh_deadline(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); if (shost->eh_deadline == -1) return snprintf(buf, strlen("off") + 2, "off\n"); return sprintf(buf, "%u\n", shost->eh_deadline / HZ); } static ssize_t store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); int ret = -EINVAL; unsigned long deadline, flags; if (shost->transportt && (shost->transportt->eh_strategy_handler || !shost->hostt->eh_host_reset_handler)) return ret; if (!strncmp(buf, "off", strlen("off"))) deadline = -1; else { ret = kstrtoul(buf, 10, &deadline); if (ret) return ret; if (deadline * HZ > UINT_MAX) return -EINVAL; } spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_in_recovery(shost)) ret = -EBUSY; else { if (deadline == -1) shost->eh_deadline = -1; else shost->eh_deadline = deadline * HZ; ret = count; } spin_unlock_irqrestore(shost->host_lock, flags); return ret; } static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(can_queue, "%d\n"); shost_rd_attr(sg_tablesize, "%hu\n"); shost_rd_attr(sg_prot_tablesize, "%hu\n"); shost_rd_attr(prot_capabilities, "%u\n"); shost_rd_attr(prot_guard_type, "%hd\n"); shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); static ssize_t show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); return snprintf(buf, 20, "%d\n", scsi_host_busy(shost)); } static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); static ssize_t show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "1\n"); } static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL); static ssize_t show_nr_hw_queues(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct blk_mq_tag_set *tag_set = &shost->tag_set; return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues); } static DEVICE_ATTR(nr_hw_queues, S_IRUGO, show_nr_hw_queues, NULL); static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_use_blk_mq.attr, &dev_attr_unique_id.attr, &dev_attr_host_busy.attr, &dev_attr_cmd_per_lun.attr, &dev_attr_can_queue.attr, &dev_attr_sg_tablesize.attr, &dev_attr_sg_prot_tablesize.attr, &dev_attr_proc_name.attr, &dev_attr_scan.attr, &dev_attr_hstate.attr, &dev_attr_supported_mode.attr, &dev_attr_active_mode.attr, &dev_attr_prot_capabilities.attr, &dev_attr_prot_guard_type.attr, &dev_attr_host_reset.attr, &dev_attr_eh_deadline.attr, &dev_attr_nr_hw_queues.attr, NULL }; static const struct attribute_group scsi_shost_attr_group = { .attrs = scsi_sysfs_shost_attrs, }; const struct attribute_group *scsi_shost_groups[] = { &scsi_shost_attr_group, NULL }; static void scsi_device_cls_release(struct device *class_dev) { struct scsi_device *sdev; sdev = class_to_sdev(class_dev); put_device(&sdev->sdev_gendev); } static void scsi_device_dev_release(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct device *parent; struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL; struct scsi_vpd *vpd_pgb7 = NULL; unsigned long flags; might_sleep(); scsi_dh_release_device(sdev); parent = sdev->sdev_gendev.parent; spin_lock_irqsave(sdev->host->host_lock, flags); list_del(&sdev->siblings); list_del(&sdev->same_target_siblings); list_del(&sdev->starved_entry); spin_unlock_irqrestore(sdev->host->host_lock, flags); cancel_work_sync(&sdev->event_work); list_for_each_safe(this, tmp, &sdev->event_list) { struct scsi_event *evt; evt = list_entry(this, struct scsi_event, node); list_del(&evt->node); kfree(evt); } blk_put_queue(sdev->request_queue); /* NULL queue means the device can't be used */ sdev->request_queue = NULL; sbitmap_free(&sdev->budget_map); mutex_lock(&sdev->inquiry_mutex); vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb7 = rcu_replace_pointer(sdev->vpd_pgb7, vpd_pgb7, lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_pg0) kfree_rcu(vpd_pg0, rcu); if (vpd_pg83) kfree_rcu(vpd_pg83, rcu); if (vpd_pg80) kfree_rcu(vpd_pg80, rcu); if (vpd_pg89) kfree_rcu(vpd_pg89, rcu); if (vpd_pgb0) kfree_rcu(vpd_pgb0, rcu); if (vpd_pgb1) kfree_rcu(vpd_pgb1, rcu); if (vpd_pgb2) kfree_rcu(vpd_pgb2, rcu); if (vpd_pgb7) kfree_rcu(vpd_pgb7, rcu); kfree(sdev->inquiry); kfree(sdev); if (parent) put_device(parent); } static struct class sdev_class = { .name = "scsi_device", .dev_release = scsi_device_cls_release, }; /* all probing is done in the individual ->probe routines */ static int scsi_bus_match(struct device *dev, const struct device_driver *gendrv) { struct scsi_device *sdp; if (dev->type != &scsi_dev_type) return 0; sdp = to_scsi_device(dev); if (sdp->no_uld_attach) return 0; return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; } static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct scsi_device *sdev; if (dev->type != &scsi_dev_type) return 0; sdev = to_scsi_device(dev); add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); return 0; } const struct bus_type scsi_bus_type = { .name = "scsi", .match = scsi_bus_match, .uevent = scsi_bus_uevent, #ifdef CONFIG_PM .pm = &scsi_bus_pm_ops, #endif }; int scsi_sysfs_register(void) { int error; error = bus_register(&scsi_bus_type); if (!error) { error = class_register(&sdev_class); if (error) bus_unregister(&scsi_bus_type); } return error; } void scsi_sysfs_unregister(void) { class_unregister(&sdev_class); bus_unregister(&scsi_bus_type); } /* * sdev_show_function: macro to create an attr function that can be used to * show a non-bit field. */ #define sdev_show_function(field, format_string) \ static ssize_t \ sdev_show_##field (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev; \ sdev = to_scsi_device(dev); \ return snprintf (buf, 20, format_string, sdev->field); \ } \ /* * sdev_rd_attr: macro to create a function and attribute variable for a * read only field. */ #define sdev_rd_attr(field, format_string) \ sdev_show_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); /* * Create the actual show/store functions and data structures. */ sdev_rd_attr (type, "%d\n"); sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (model, "%.16s\n"); sdev_rd_attr (rev, "%.4s\n"); sdev_rd_attr (cdl_supported, "%d\n"); static ssize_t sdev_show_device_busy(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev)); } static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); static ssize_t sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); } static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); /* * TODO: can we make these symlinks to the block layer ones? */ static ssize_t sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); } static ssize_t sdev_store_timeout (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); int ret, timeout; ret = kstrtoint(buf, 0, &timeout); if (ret) return ret; if (timeout <= 0) return -EINVAL; blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); return count; } static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); static ssize_t sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); } static ssize_t sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev; unsigned int eh_timeout; int err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; sdev = to_scsi_device(dev); err = kstrtouint(buf, 10, &eh_timeout); if (err) return err; sdev->eh_timeout = eh_timeout * HZ; return count; } static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); static ssize_t store_rescan_field (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_rescan_device(to_scsi_device(dev)); return count; } static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; struct scsi_device *sdev = to_scsi_device(dev); /* * We need to try to get module, avoiding the module been removed * during delete. */ if (scsi_device_get(sdev)) return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); /* * Concurrent writes into the "delete" sysfs attribute may trigger * concurrent calls to device_remove_file() and scsi_remove_device(). * device_remove_file() handles concurrent removal calls by * serializing these and by ignoring the second and later removal * attempts. Concurrent calls of scsi_remove_device() are * serialized. The second and later calls of scsi_remove_device() are * ignored because the first call of that function changes the device * state into SDEV_DEL. */ device_remove_file(dev, attr); scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); static ssize_t store_state_field(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int i, ret; struct scsi_device *sdev = to_scsi_device(dev); enum scsi_device_state state = 0; bool rescan_dev = false; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { const int len = strlen(sdev_states[i].name); if (strncmp(sdev_states[i].name, buf, len) == 0 && buf[len] == '\n') { state = sdev_states[i].value; break; } } switch (state) { case SDEV_RUNNING: case SDEV_OFFLINE: break; default: return -EINVAL; } mutex_lock(&sdev->state_mutex); switch (sdev->sdev_state) { case SDEV_RUNNING: case SDEV_OFFLINE: break; default: mutex_unlock(&sdev->state_mutex); return -EINVAL; } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { ret = scsi_device_set_state(sdev, state); if (ret == 0 && state == SDEV_RUNNING) rescan_dev = true; } mutex_unlock(&sdev->state_mutex); if (rescan_dev) { /* * If the device state changes to SDEV_RUNNING, we need to * run the queue to avoid I/O hang, and rescan the device * to revalidate it. Running the queue first is necessary * because another thread may be waiting inside * blk_mq_freeze_queue_wait() and because that call may be * waiting for pending I/O to finish. */ blk_mq_run_hw_queues(sdev->request_queue, true); scsi_rescan_device(sdev); } return ret == 0 ? count : -EINVAL; } static ssize_t show_state_field(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); const char *name = scsi_device_state_name(sdev->sdev_state); if (!name) return -EINVAL; return snprintf(buf, 20, "%s\n", name); } static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); static ssize_t show_queue_type_field(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); const char *name = "none"; if (sdev->simple_tags) name = "simple"; return snprintf(buf, 20, "%s\n", name); } static ssize_t store_queue_type_field(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->tagged_supported) return -EINVAL; sdev_printk(KERN_INFO, sdev, "ignoring write to deprecated queue_type attribute"); return count; } static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, store_queue_type_field); #define sdev_vpd_pg_attr(_page) \ static ssize_t \ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ const struct bin_attribute *bin_attr, \ char *buf, loff_t off, size_t count) \ { \ struct device *dev = kobj_to_dev(kobj); \ struct scsi_device *sdev = to_scsi_device(dev); \ struct scsi_vpd *vpd_page; \ int ret = -EINVAL; \ \ rcu_read_lock(); \ vpd_page = rcu_dereference(sdev->vpd_##_page); \ if (vpd_page) \ ret = memory_read_from_buffer(buf, count, &off, \ vpd_page->data, vpd_page->len); \ rcu_read_unlock(); \ return ret; \ } \ static const struct bin_attribute dev_attr_vpd_##_page = { \ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ .size = 0, \ .read = show_vpd_##_page, \ }; sdev_vpd_pg_attr(pg83); sdev_vpd_pg_attr(pg80); sdev_vpd_pg_attr(pg89); sdev_vpd_pg_attr(pgb0); sdev_vpd_pg_attr(pgb1); sdev_vpd_pg_attr(pgb2); sdev_vpd_pg_attr(pgb7); sdev_vpd_pg_attr(pg0); static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->inquiry) return -EINVAL; return memory_read_from_buffer(buf, count, &off, sdev->inquiry, sdev->inquiry_len); } static const struct bin_attribute dev_attr_inquiry = { .attr = { .name = "inquiry", .mode = S_IRUGO, }, .size = 0, .read = show_inquiry, }; static ssize_t show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); } static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); #define show_sdev_iostat(field) \ static ssize_t \ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ unsigned long long count = atomic_read(&sdev->field); \ return snprintf(buf, 20, "0x%llx\n", count); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) show_sdev_iostat(iorequest_cnt); show_sdev_iostat(iodone_cnt); show_sdev_iostat(ioerr_cnt); show_sdev_iostat(iotmo_cnt); static ssize_t sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); } static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); #define DECLARE_EVT_SHOW(name, Cap_name) \ static ssize_t \ sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ return snprintf(buf, 20, "%d\n", val); \ } #define DECLARE_EVT_STORE(name, Cap_name) \ static ssize_t \ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ const char *buf, size_t count) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ int val = simple_strtoul(buf, NULL, 0); \ if (val == 0) \ clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ else if (val == 1) \ set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ else \ return -EINVAL; \ return count; \ } #define DECLARE_EVT(name, Cap_name) \ DECLARE_EVT_SHOW(name, Cap_name) \ DECLARE_EVT_STORE(name, Cap_name) \ static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ sdev_store_evt_##name); #define REF_EVT(name) &dev_attr_evt_##name.attr DECLARE_EVT(media_change, MEDIA_CHANGE) DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) static ssize_t sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int depth, retval; struct scsi_device *sdev = to_scsi_device(dev); const struct scsi_host_template *sht = sdev->host->hostt; if (!sht->change_queue_depth) return -EINVAL; depth = simple_strtoul(buf, NULL, 0); if (depth < 1 || depth > sdev->host->can_queue) return -EINVAL; retval = sht->change_queue_depth(sdev, depth); if (retval < 0) return retval; sdev->max_queue_depth = sdev->queue_depth; return count; } sdev_show_function(queue_depth, "%d\n"); static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, sdev_store_queue_depth); static ssize_t sdev_show_wwid(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); ssize_t count; count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE); if (count > 0) { buf[count] = '\n'; count++; } return count; } static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); #define BLIST_FLAG_NAME(name) \ [const_ilog2((__force __u64)BLIST_##name)] = #name static const char *const sdev_bflags_name[] = { #include "scsi_devinfo_tbl.c" }; #undef BLIST_FLAG_NAME static ssize_t sdev_show_blacklist(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); int i; ssize_t len = 0; for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) { const char *name = NULL; if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i))) continue; if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i]) name = sdev_bflags_name[i]; if (name) len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? " " : "", name); else len += scnprintf(buf + len, PAGE_SIZE - len, "%sINVALID_BIT(%d)", len ? " " : "", i); } if (len) len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); return len; } static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL); #ifdef CONFIG_SCSI_DH static ssize_t sdev_show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->handler) return snprintf(buf, 20, "detached\n"); return snprintf(buf, 20, "%s\n", sdev->handler->name); } static ssize_t sdev_store_dh_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); int err = -EINVAL; if (sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) return -ENODEV; if (!sdev->handler) { /* * Attach to a device handler */ err = scsi_dh_attach(sdev->request_queue, buf); } else if (!strncmp(buf, "activate", 8)) { /* * Activate a device handler */ if (sdev->handler->activate) err = sdev->handler->activate(sdev, NULL, NULL); else err = 0; } else if (!strncmp(buf, "detach", 6)) { /* * Detach from a device handler */ sdev_printk(KERN_WARNING, sdev, "can't detach handler %s.\n", sdev->handler->name); err = -EINVAL; } return err < 0 ? err : count; } static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state, sdev_store_dh_state); static ssize_t sdev_show_access_state(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); unsigned char access_state; const char *access_state_name; if (!sdev->handler) return -EINVAL; access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK); access_state_name = scsi_access_state_name(access_state); return sprintf(buf, "%s\n", access_state_name ? access_state_name : "unknown"); } static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL); static ssize_t sdev_show_preferred_path(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->handler) return -EINVAL; if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL); #endif static ssize_t sdev_show_queue_ramp_up_period(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%u\n", jiffies_to_msecs(sdev->queue_ramp_up_period)); } static ssize_t sdev_store_queue_ramp_up_period(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); unsigned int period; if (kstrtouint(buf, 10, &period)) return -EINVAL; sdev->queue_ramp_up_period = msecs_to_jiffies(period); return count; } static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, sdev_show_queue_ramp_up_period, sdev_store_queue_ramp_up_period); static ssize_t sdev_show_cdl_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return sysfs_emit(buf, "%d\n", (int)sdev->cdl_enable); } static ssize_t sdev_store_cdl_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; bool v; if (kstrtobool(buf, &v)) return -EINVAL; ret = scsi_cdl_enable(to_scsi_device(dev), v); if (ret) return ret; return count; } static DEVICE_ATTR(cdl_enable, S_IRUGO | S_IWUSR, sdev_show_cdl_enable, sdev_store_cdl_enable); static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (attr == &dev_attr_queue_depth.attr && !sdev->host->hostt->change_queue_depth) return S_IRUGO; if (attr == &dev_attr_queue_ramp_up_period.attr && !sdev->host->hostt->change_queue_depth) return 0; return attr->mode; } static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, const struct bin_attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0) return 0; if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) return 0; if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) return 0; if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89) return 0; if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0) return 0; if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1) return 0; if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2) return 0; if (attr == &dev_attr_vpd_pgb7 && !sdev->vpd_pgb7) return 0; return S_IRUGO; } /* Default template for device attributes. May NOT be modified */ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_device_blocked.attr, &dev_attr_type.attr, &dev_attr_scsi_level.attr, &dev_attr_device_busy.attr, &dev_attr_vendor.attr, &dev_attr_model.attr, &dev_attr_rev.attr, &dev_attr_rescan.attr, &dev_attr_delete.attr, &dev_attr_state.attr, &dev_attr_timeout.attr, &dev_attr_eh_timeout.attr, &dev_attr_iocounterbits.attr, &dev_attr_iorequest_cnt.attr, &dev_attr_iodone_cnt.attr, &dev_attr_ioerr_cnt.attr, &dev_attr_iotmo_cnt.attr, &dev_attr_modalias.attr, &dev_attr_queue_depth.attr, &dev_attr_queue_type.attr, &dev_attr_wwid.attr, &dev_attr_blacklist.attr, #ifdef CONFIG_SCSI_DH &dev_attr_dh_state.attr, &dev_attr_access_state.attr, &dev_attr_preferred_path.attr, #endif &dev_attr_queue_ramp_up_period.attr, &dev_attr_cdl_supported.attr, &dev_attr_cdl_enable.attr, REF_EVT(media_change), REF_EVT(inquiry_change_reported), REF_EVT(capacity_change_reported), REF_EVT(soft_threshold_reached), REF_EVT(mode_parameter_change_reported), REF_EVT(lun_change_reported), NULL }; static const struct bin_attribute *const scsi_sdev_bin_attrs[] = { &dev_attr_vpd_pg0, &dev_attr_vpd_pg83, &dev_attr_vpd_pg80, &dev_attr_vpd_pg89, &dev_attr_vpd_pgb0, &dev_attr_vpd_pgb1, &dev_attr_vpd_pgb2, &dev_attr_vpd_pgb7, &dev_attr_inquiry, NULL }; static struct attribute_group scsi_sdev_attr_group = { .attrs = scsi_sdev_attrs, .bin_attrs = scsi_sdev_bin_attrs, .is_visible = scsi_sdev_attr_is_visible, .is_bin_visible = scsi_sdev_bin_attr_is_visible, }; static const struct attribute_group *scsi_sdev_attr_groups[] = { &scsi_sdev_attr_group, NULL }; static int scsi_target_add(struct scsi_target *starget) { int error; if (starget->state != STARGET_CREATED) return 0; error = device_add(&starget->dev); if (error) { dev_err(&starget->dev, "target device_add failed, error %d\n", error); return error; } transport_add_device(&starget->dev); starget->state = STARGET_RUNNING; pm_runtime_set_active(&starget->dev); pm_runtime_enable(&starget->dev); device_enable_async_suspend(&starget->dev); return 0; } /** * scsi_sysfs_add_sdev - add scsi device to sysfs * @sdev: scsi_device to add * * Return value: * 0 on Success / non-zero on Failure **/ int scsi_sysfs_add_sdev(struct scsi_device *sdev) { int error; struct scsi_target *starget = sdev->sdev_target; if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev))) return -EINVAL; error = scsi_target_add(starget); if (error) return error; transport_configure_device(&starget->dev); device_enable_async_suspend(&sdev->sdev_gendev); scsi_autopm_get_target(starget); pm_runtime_set_active(&sdev->sdev_gendev); if (!sdev->rpm_autosuspend) pm_runtime_forbid(&sdev->sdev_gendev); pm_runtime_enable(&sdev->sdev_gendev); scsi_autopm_put_target(starget); scsi_autopm_get_device(sdev); scsi_dh_add_device(sdev); error = device_add(&sdev->sdev_gendev); if (error) { sdev_printk(KERN_INFO, sdev, "failed to add device: %d\n", error); return error; } device_enable_async_suspend(&sdev->sdev_dev); error = device_add(&sdev->sdev_dev); if (error) { sdev_printk(KERN_INFO, sdev, "failed to add class device: %d\n", error); device_del(&sdev->sdev_gendev); return error; } transport_add_device(&sdev->sdev_gendev); sdev->is_visible = 1; if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { sdev->bsg_dev = scsi_bsg_register_queue(sdev); if (IS_ERR(sdev->bsg_dev)) { error = PTR_ERR(sdev->bsg_dev); sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", error); sdev->bsg_dev = NULL; } } scsi_autopm_put_device(sdev); return error; } void __scsi_remove_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; int res; /* * This cleanup path is not reentrant and while it is impossible * to get a new reference with scsi_device_get() someone can still * hold a previously acquired one. */ if (sdev->sdev_state == SDEV_DEL) return; if (sdev->is_visible) { /* * If scsi_internal_target_block() is running concurrently, * wait until it has finished before changing the device state. */ mutex_lock(&sdev->state_mutex); /* * If blocked, we go straight to DEL and restart the queue so * any commands issued during driver shutdown (like sync * cache) are errored immediately. */ res = scsi_device_set_state(sdev, SDEV_CANCEL); if (res != 0) { res = scsi_device_set_state(sdev, SDEV_DEL); if (res == 0) scsi_start_queue(sdev); } mutex_unlock(&sdev->state_mutex); if (res != 0) return; if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev) bsg_unregister_queue(sdev->bsg_dev); device_unregister(&sdev->sdev_dev); transport_remove_device(dev); device_del(dev); } else put_device(&sdev->sdev_dev); /* * Stop accepting new requests and wait until all queuecommand() and * scsi_run_queue() invocations have finished before tearing down the * device. */ mutex_lock(&sdev->state_mutex); scsi_device_set_state(sdev, SDEV_DEL); mutex_unlock(&sdev->state_mutex); blk_mq_destroy_queue(sdev->request_queue); kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); cancel_work_sync(&sdev->requeue_work); if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy) sdev->host->hostt->sdev_destroy(sdev); transport_destroy_device(dev); /* * Paired with the kref_get() in scsi_sysfs_initialize(). We have * removed sysfs visibility from the device, so make the target * invisible if this was the last device underneath it. */ scsi_target_reap(scsi_target(sdev)); put_device(dev); } /** * scsi_remove_device - unregister a device from the scsi bus * @sdev: scsi_device to unregister **/ void scsi_remove_device(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; mutex_lock(&shost->scan_mutex); __scsi_remove_device(sdev); mutex_unlock(&shost->scan_mutex); } EXPORT_SYMBOL(scsi_remove_device); static void __scsi_remove_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; struct scsi_device *sdev; spin_lock_irqsave(shost->host_lock, flags); restart: list_for_each_entry(sdev, &shost->__devices, siblings) { /* * We cannot call scsi_device_get() here, as * we might've been called from rmmod() causing * scsi_device_get() to fail the module_is_live() * check. */ if (sdev->channel != starget->channel || sdev->id != starget->id) continue; if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_remove_device(sdev); put_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); goto restart; } spin_unlock_irqrestore(shost->host_lock, flags); } /** * scsi_remove_target - try to remove a target and all its devices * @dev: generic starget or parent of generic stargets to be removed * * Note: This is slightly racy. It is possible that if the user * requests the addition of another device then the target won't be * removed. */ void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); struct scsi_target *starget; unsigned long flags; restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || starget->state == STARGET_REMOVE || starget->state == STARGET_CREATED_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); if (starget->state == STARGET_CREATED) starget->state = STARGET_CREATED_REMOVE; else starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); goto restart; } } spin_unlock_irqrestore(shost->host_lock, flags); } EXPORT_SYMBOL(scsi_remove_target); int __scsi_register_driver(struct device_driver *drv, struct module *owner) { drv->bus = &scsi_bus_type; drv->owner = owner; return driver_register(drv); } EXPORT_SYMBOL(__scsi_register_driver); int scsi_register_interface(struct class_interface *intf) { intf->class = &sdev_class; return class_interface_register(intf); } EXPORT_SYMBOL(scsi_register_interface); /** * scsi_sysfs_add_host - add scsi host to subsystem * @shost: scsi host struct to add to subsystem **/ int scsi_sysfs_add_host(struct Scsi_Host *shost) { transport_register_device(&shost->shost_gendev); transport_configure_device(&shost->shost_gendev); return 0; } static const struct device_type scsi_dev_type = { .name = "scsi_device", .release = scsi_device_dev_release, .groups = scsi_sdev_attr_groups, }; void scsi_sysfs_device_initialize(struct scsi_device *sdev) { unsigned long flags; struct Scsi_Host *shost = sdev->host; const struct scsi_host_template *hostt = shost->hostt; struct scsi_target *starget = sdev->sdev_target; device_initialize(&sdev->sdev_gendev); sdev->sdev_gendev.bus = &scsi_bus_type; sdev->sdev_gendev.type = &scsi_dev_type; scsi_enable_async_suspend(&sdev->sdev_gendev); dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); sdev->sdev_gendev.groups = hostt->sdev_groups; device_initialize(&sdev->sdev_dev); sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); sdev->sdev_dev.class = &sdev_class; dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); /* * Get a default scsi_level from the target (derived from sibling * devices). This is the best we can do for guessing how to set * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the * setting doesn't matter, because all the bits are zero anyway. * But it does matter for higher LUNs. */ sdev->scsi_level = starget->scsi_level; if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN && !shost->no_scsi2_lun_in_cdb) sdev->lun_in_cdb = 1; transport_setup_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); list_add_tail(&sdev->same_target_siblings, &starget->devices); list_add_tail(&sdev->siblings, &shost->__devices); spin_unlock_irqrestore(shost->host_lock, flags); /* * device can now only be removed via __scsi_remove_device() so hold * the target. Target will be held in CREATED state until something * beneath it becomes visible (in which case it moves to RUNNING) */ kref_get(&starget->reap_ref); } int scsi_is_sdev_device(const struct device *dev) { return dev->type == &scsi_dev_type; } EXPORT_SYMBOL(scsi_is_sdev_device); /* A blank transport template that is used in drivers that don't * yet implement Transport Attributes */ struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; |
| 7 6 3 4 4 4 3 4 4 4 7 3 1 2 1 2 1 2 1 1 1 1 2 1 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 | // SPDX-License-Identifier: GPL-2.0+ /* * USB Cypress M8 driver * * Copyright (C) 2004 * Lonnie Mendez (dignome@gmail.com) * Copyright (C) 2003,2004 * Neil Whelchel (koyama@firstlight.net) * * See Documentation/usb/usb-serial.rst for more information on using this * driver * * See http://geocities.com/i0xox0i for information on this driver and the * earthmate usb device. */ /* Thanks to Neil Whelchel for writing the first cypress m8 implementation for linux. */ /* Thanks to cypress for providing references for the hid reports. */ /* Thanks to Jiang Zhang for providing links and for general help. */ /* Code originates and was built up from ftdi_sio, belkin, pl2303 and others.*/ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include "cypress_m8.h" static bool stats; static int interval; static bool unstable_bauds; #define DRIVER_AUTHOR "Lonnie Mendez <dignome@gmail.com>, Neil Whelchel <koyama@firstlight.net>" #define DRIVER_DESC "Cypress USB to Serial Driver" /* write buffer size defines */ #define CYPRESS_BUF_SIZE 1024 static const struct usb_device_id id_table_earthmate[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_nokiaca42v2[] = { { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); enum packet_format { packet_format_1, /* b0:status, b1:payload count */ packet_format_2 /* b0[7:3]:status, b0[2:0]:payload count */ }; struct cypress_private { spinlock_t lock; /* private lock */ int chiptype; /* identifier of device, for quirks/etc */ int bytes_in; /* used for statistics */ int bytes_out; /* used for statistics */ int cmd_count; /* used for statistics */ int cmd_ctrl; /* always set this to 1 before issuing a command */ struct kfifo write_fifo; /* write fifo */ int write_urb_in_use; /* write urb in use indicator */ int write_urb_interval; /* interval to use for write urb */ int read_urb_interval; /* interval to use for read urb */ int comm_is_ok; /* true if communication is (still) ok */ __u8 line_control; /* holds dtr / rts value */ __u8 current_status; /* received from last read - info on dsr,cts,cd,ri,etc */ __u8 current_config; /* stores the current configuration byte */ __u8 rx_flags; /* throttling - used from whiteheat/ftdi_sio */ enum packet_format pkt_fmt; /* format to use for packet send / receive */ int get_cfg_unsafe; /* If true, the CYPRESS_GET_CONFIG is unsafe */ int baud_rate; /* stores current baud rate in integer form */ char prev_status; /* used for TIOCMIWAIT */ }; /* function prototypes for the Cypress USB to serial device */ static int cypress_earthmate_port_probe(struct usb_serial_port *port); static int cypress_hidcom_port_probe(struct usb_serial_port *port); static int cypress_ca42v2_port_probe(struct usb_serial_port *port); static void cypress_port_remove(struct usb_serial_port *port); static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port); static void cypress_close(struct usb_serial_port *port); static void cypress_dtr_rts(struct usb_serial_port *port, int on); static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static void cypress_send(struct usb_serial_port *port); static unsigned int cypress_write_room(struct tty_struct *tty); static void cypress_earthmate_init_termios(struct tty_struct *tty); static void cypress_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int cypress_tiocmget(struct tty_struct *tty); static int cypress_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static unsigned int cypress_chars_in_buffer(struct tty_struct *tty); static void cypress_throttle(struct tty_struct *tty); static void cypress_unthrottle(struct tty_struct *tty); static void cypress_set_dead(struct usb_serial_port *port); static void cypress_read_int_callback(struct urb *urb); static void cypress_write_int_callback(struct urb *urb); static struct usb_serial_driver cypress_earthmate_device = { .driver = { .name = "earthmate", }, .description = "DeLorme Earthmate USB", .id_table = id_table_earthmate, .num_ports = 1, .port_probe = cypress_earthmate_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .init_termios = cypress_earthmate_init_termios, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver cypress_hidcom_device = { .driver = { .name = "cyphidcom", }, .description = "HID->COM RS232 Adapter", .id_table = id_table_cyphidcomrs232, .num_ports = 1, .port_probe = cypress_hidcom_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver cypress_ca42v2_device = { .driver = { .name = "nokiaca42v2", }, .description = "Nokia CA-42 V2 Adapter", .id_table = id_table_nokiaca42v2, .num_ports = 1, .port_probe = cypress_ca42v2_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &cypress_earthmate_device, &cypress_hidcom_device, &cypress_ca42v2_device, NULL }; /***************************************************************************** * Cypress serial helper functions *****************************************************************************/ /* FRWD Dongle hidcom needs to skip reset and speed checks */ static inline bool is_frwd(struct usb_device *dev) { return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) && (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD)); } static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) { struct cypress_private *priv; priv = usb_get_serial_port_data(port); if (unstable_bauds) return new_rate; /* FRWD Dongle uses 115200 bps */ if (is_frwd(port->serial->dev)) return new_rate; /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme * chose to use the general purpose firmware or not), if you * need to modify this speed setting for your own project * please add your own chiptype and modify the code likewise. * The Cypress HID->COM device will work successfully up to * 115200bps (but the actual throughput is around 3kBps). */ if (port->serial->dev->speed == USB_SPEED_LOW) { /* * Mike Isely <isely@pobox.com> 2-Feb-2008: The * Cypress app note that describes this mechanism * states that the low-speed part can't handle more * than 800 bytes/sec, in which case 4800 baud is the * safest speed for a part like that. */ if (new_rate > 4800) { dev_dbg(&port->dev, "%s - failed setting baud rate, device incapable speed %d\n", __func__, new_rate); return -1; } } switch (priv->chiptype) { case CT_EARTHMATE: if (new_rate <= 600) { /* 300 and 600 baud rates are supported under * the generic firmware, but are not used with * NMEA and SiRF protocols */ dev_dbg(&port->dev, "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS\n", __func__, new_rate); return -1; } break; default: break; } return new_rate; } /* This function can either set or retrieve the current serial line settings */ static int cypress_serial_control(struct tty_struct *tty, struct usb_serial_port *port, speed_t baud_rate, int data_bits, int stop_bits, int parity_enable, int parity_type, int reset, int cypress_request_type) { int new_baudrate = 0, retval = 0, tries = 0; struct cypress_private *priv; struct device *dev = &port->dev; u8 *feature_buffer; const unsigned int feature_len = 5; unsigned long flags; priv = usb_get_serial_port_data(port); if (!priv->comm_is_ok) return -ENODEV; feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL); if (!feature_buffer) return -ENOMEM; switch (cypress_request_type) { case CYPRESS_SET_CONFIG: /* 0 means 'Hang up' so doesn't change the true bit rate */ new_baudrate = priv->baud_rate; if (baud_rate && baud_rate != priv->baud_rate) { dev_dbg(dev, "%s - baud rate is changing\n", __func__); retval = analyze_baud_rate(port, baud_rate); if (retval >= 0) { new_baudrate = retval; dev_dbg(dev, "%s - New baud rate set to %d\n", __func__, new_baudrate); } } dev_dbg(dev, "%s - baud rate is being sent as %d\n", __func__, new_baudrate); /* fill the feature_buffer with new configuration */ put_unaligned_le32(new_baudrate, feature_buffer); feature_buffer[4] |= data_bits - 5; /* assign data bits in 2 bit space ( max 3 ) */ /* 1 bit gap */ feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ feature_buffer[4] |= (parity_enable << 4); /* assign parity flag in 1 bit space */ feature_buffer[4] |= (parity_type << 5); /* assign parity type in 1 bit space */ /* 1 bit gap */ feature_buffer[4] |= (reset << 7); /* assign reset at end of byte, 1 bit space */ dev_dbg(dev, "%s - device is being sent this feature report:\n", __func__); dev_dbg(dev, "%s - %02X - %02X - %02X - %02X - %02X\n", __func__, feature_buffer[0], feature_buffer[1], feature_buffer[2], feature_buffer[3], feature_buffer[4]); do { retval = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, feature_len, 500); if (tries++ >= 3) break; } while (retval != feature_len && retval != -ENODEV); if (retval != feature_len) { dev_err(dev, "%s - failed sending serial line settings - %d\n", __func__, retval); cypress_set_dead(port); } else { spin_lock_irqsave(&priv->lock, flags); priv->baud_rate = new_baudrate; priv->current_config = feature_buffer[4]; spin_unlock_irqrestore(&priv->lock, flags); /* If we asked for a speed change encode it */ if (baud_rate) tty_encode_baud_rate(tty, new_baudrate, new_baudrate); } break; case CYPRESS_GET_CONFIG: if (priv->get_cfg_unsafe) { /* Not implemented for this device, and if we try to do it we're likely to crash the hardware. */ retval = -ENOTTY; goto out; } dev_dbg(dev, "%s - retrieving serial line settings\n", __func__); do { retval = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, feature_len, 500); if (tries++ >= 3) break; } while (retval != feature_len && retval != -ENODEV); if (retval != feature_len) { dev_err(dev, "%s - failed to retrieve serial line settings - %d\n", __func__, retval); cypress_set_dead(port); goto out; } else { spin_lock_irqsave(&priv->lock, flags); /* store the config in one byte, and later use bit masks to check values */ priv->current_config = feature_buffer[4]; priv->baud_rate = get_unaligned_le32(feature_buffer); spin_unlock_irqrestore(&priv->lock, flags); } } spin_lock_irqsave(&priv->lock, flags); ++priv->cmd_count; spin_unlock_irqrestore(&priv->lock, flags); out: kfree(feature_buffer); return retval; } /* cypress_serial_control */ static void cypress_set_dead(struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (!priv->comm_is_ok) { spin_unlock_irqrestore(&priv->lock, flags); return; } priv->comm_is_ok = 0; spin_unlock_irqrestore(&priv->lock, flags); dev_err(&port->dev, "cypress_m8 suspending failing port %d - " "interval might be too short\n", port->port_number); } /***************************************************************************** * Cypress serial driver functions *****************************************************************************/ static int cypress_generic_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct cypress_private *priv; if (!port->interrupt_out_urb || !port->interrupt_in_urb) { dev_err(&port->dev, "required endpoint is missing\n"); return -ENODEV; } priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->comm_is_ok = !0; spin_lock_init(&priv->lock); if (kfifo_alloc(&priv->write_fifo, CYPRESS_BUF_SIZE, GFP_KERNEL)) { kfree(priv); return -ENOMEM; } /* Skip reset for FRWD device. It is a workaound: device hangs if it receives SET_CONFIGURE in Configured state. */ if (!is_frwd(serial->dev)) usb_reset_configuration(serial->dev); priv->cmd_ctrl = 0; priv->line_control = 0; priv->rx_flags = 0; /* Default packet format setting is determined by packet size. Anything with a size larger then 9 must have a separate count field since the 3 bit count field is otherwise too small. Otherwise we can use the slightly more compact format. This is in accordance with the cypress_m8 serial converter app note. */ if (port->interrupt_out_size > 9) priv->pkt_fmt = packet_format_1; else priv->pkt_fmt = packet_format_2; if (interval > 0) { priv->write_urb_interval = interval; priv->read_urb_interval = interval; dev_dbg(&port->dev, "%s - read & write intervals forced to %d\n", __func__, interval); } else { priv->write_urb_interval = port->interrupt_out_urb->interval; priv->read_urb_interval = port->interrupt_in_urb->interval; dev_dbg(&port->dev, "%s - intervals: read=%d write=%d\n", __func__, priv->read_urb_interval, priv->write_urb_interval); } usb_set_serial_port_data(port, priv); port->port.drain_delay = 256; return 0; } static int cypress_earthmate_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_EARTHMATE; /* All Earthmate devices use the separated-count packet format! Idiotic. */ priv->pkt_fmt = packet_format_1; if (serial->dev->descriptor.idProduct != cpu_to_le16(PRODUCT_ID_EARTHMATEUSB)) { /* The old original USB Earthmate seemed able to handle GET_CONFIG requests; everything they've produced since that time crashes if this command is attempted :-( */ dev_dbg(&port->dev, "%s - Marking this device as unsafe for GET_CONFIG commands\n", __func__); priv->get_cfg_unsafe = !0; } return 0; } static int cypress_hidcom_port_probe(struct usb_serial_port *port) { struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_CYPHIDCOM; return 0; } static int cypress_ca42v2_port_probe(struct usb_serial_port *port) { struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_CA42V2; return 0; } static void cypress_port_remove(struct usb_serial_port *port) { struct cypress_private *priv; priv = usb_get_serial_port_data(port); kfifo_free(&priv->write_fifo); kfree(priv); } static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; unsigned long flags; int result = 0; if (!priv->comm_is_ok) return -EIO; /* clear halts before open */ usb_clear_halt(serial->dev, 0x81); usb_clear_halt(serial->dev, 0x02); spin_lock_irqsave(&priv->lock, flags); /* reset read/write statistics */ priv->bytes_in = 0; priv->bytes_out = 0; priv->cmd_count = 0; priv->rx_flags = 0; spin_unlock_irqrestore(&priv->lock, flags); /* Set termios */ cypress_send(port); if (tty) cypress_set_termios(tty, port, NULL); /* setup the port and start reading from the device */ usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, cypress_read_int_callback, port, priv->read_urb_interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting read urb, error %d\n", __func__, result); cypress_set_dead(port); } return result; } /* cypress_open */ static void cypress_dtr_rts(struct usb_serial_port *port, int on) { struct cypress_private *priv = usb_get_serial_port_data(port); /* drop dtr and rts */ spin_lock_irq(&priv->lock); if (on == 0) priv->line_control = 0; else priv->line_control = CONTROL_DTR | CONTROL_RTS; priv->cmd_ctrl = 1; spin_unlock_irq(&priv->lock); cypress_write(NULL, port, NULL, 0); } static void cypress_close(struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); kfifo_reset_out(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - stopping urbs\n", __func__); usb_kill_urb(port->interrupt_in_urb); usb_kill_urb(port->interrupt_out_urb); if (stats) dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n", priv->bytes_in, priv->bytes_out, priv->cmd_count); } /* cypress_close */ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct cypress_private *priv = usb_get_serial_port_data(port); dev_dbg(&port->dev, "%s - %d bytes\n", __func__, count); /* line control commands, which need to be executed immediately, are not put into the buffer for obvious reasons. */ if (priv->cmd_ctrl) { count = 0; goto finish; } if (!count) return count; count = kfifo_in_locked(&priv->write_fifo, buf, count, &priv->lock); finish: cypress_send(port); return count; } /* cypress_write */ static void cypress_send(struct usb_serial_port *port) { int count = 0, result, offset, actual_size; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; unsigned long flags; if (!priv->comm_is_ok) return; dev_dbg(dev, "%s - interrupt out size is %d\n", __func__, port->interrupt_out_size); spin_lock_irqsave(&priv->lock, flags); if (priv->write_urb_in_use) { dev_dbg(dev, "%s - can't write, urb in use\n", __func__); spin_unlock_irqrestore(&priv->lock, flags); return; } spin_unlock_irqrestore(&priv->lock, flags); /* clear buffer */ memset(port->interrupt_out_urb->transfer_buffer, 0, port->interrupt_out_size); spin_lock_irqsave(&priv->lock, flags); switch (priv->pkt_fmt) { default: case packet_format_1: /* this is for the CY7C64013... */ offset = 2; port->interrupt_out_buffer[0] = priv->line_control; break; case packet_format_2: /* this is for the CY7C63743... */ offset = 1; port->interrupt_out_buffer[0] = priv->line_control; break; } if (priv->line_control & CONTROL_RESET) priv->line_control &= ~CONTROL_RESET; if (priv->cmd_ctrl) { priv->cmd_count++; dev_dbg(dev, "%s - line control command being issued\n", __func__); spin_unlock_irqrestore(&priv->lock, flags); goto send; } else spin_unlock_irqrestore(&priv->lock, flags); count = kfifo_out_locked(&priv->write_fifo, &port->interrupt_out_buffer[offset], port->interrupt_out_size - offset, &priv->lock); if (count == 0) return; switch (priv->pkt_fmt) { default: case packet_format_1: port->interrupt_out_buffer[1] = count; break; case packet_format_2: port->interrupt_out_buffer[0] |= count; } dev_dbg(dev, "%s - count is %d\n", __func__, count); send: spin_lock_irqsave(&priv->lock, flags); priv->write_urb_in_use = 1; spin_unlock_irqrestore(&priv->lock, flags); if (priv->cmd_ctrl) actual_size = 1; else actual_size = count + (priv->pkt_fmt == packet_format_1 ? 2 : 1); usb_serial_debug_data(dev, __func__, port->interrupt_out_size, port->interrupt_out_urb->transfer_buffer); usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), port->interrupt_out_buffer, actual_size, cypress_write_int_callback, port, priv->write_urb_interval); result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); priv->write_urb_in_use = 0; cypress_set_dead(port); } spin_lock_irqsave(&priv->lock, flags); if (priv->cmd_ctrl) priv->cmd_ctrl = 0; /* do not count the line control and size bytes */ priv->bytes_out += count; spin_unlock_irqrestore(&priv->lock, flags); usb_serial_port_softint(port); } /* cypress_send */ /* returns how much space is available in the soft buffer */ static unsigned int cypress_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned int room; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); room = kfifo_avail(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, room); return room; } static int cypress_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); __u8 status, control; unsigned int result = 0; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control = priv->line_control; status = priv->current_status; spin_unlock_irqrestore(&priv->lock, flags); result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) | ((control & CONTROL_RTS) ? TIOCM_RTS : 0) | ((status & UART_CTS) ? TIOCM_CTS : 0) | ((status & UART_DSR) ? TIOCM_DSR : 0) | ((status & UART_RI) ? TIOCM_RI : 0) | ((status & UART_CD) ? TIOCM_CD : 0); dev_dbg(&port->dev, "%s - result = %x\n", __func__, result); return result; } static int cypress_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (set & TIOCM_RTS) priv->line_control |= CONTROL_RTS; if (set & TIOCM_DTR) priv->line_control |= CONTROL_DTR; if (clear & TIOCM_RTS) priv->line_control &= ~CONTROL_RTS; if (clear & TIOCM_DTR) priv->line_control &= ~CONTROL_DTR; priv->cmd_ctrl = 1; spin_unlock_irqrestore(&priv->lock, flags); return cypress_write(tty, port, NULL, 0); } static void cypress_earthmate_init_termios(struct tty_struct *tty) { tty_encode_baud_rate(tty, 4800, 4800); } static void cypress_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; int data_bits, stop_bits, parity_type, parity_enable; unsigned int cflag; unsigned long flags; __u8 oldlines; int linechange = 0; /* Unsupported features need clearing */ tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS); cflag = tty->termios.c_cflag; /* set number of data bits, parity, stop bits */ /* when parity is disabled the parity type bit is ignored */ /* 1 means 2 stop bits, 0 means 1 stop bit */ stop_bits = cflag & CSTOPB ? 1 : 0; if (cflag & PARENB) { parity_enable = 1; /* 1 means odd parity, 0 means even parity */ parity_type = cflag & PARODD ? 1 : 0; } else parity_enable = parity_type = 0; data_bits = tty_get_char_size(cflag); spin_lock_irqsave(&priv->lock, flags); oldlines = priv->line_control; if ((cflag & CBAUD) == B0) { /* drop dtr and rts */ dev_dbg(dev, "%s - dropping the lines, baud rate 0bps\n", __func__); priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); } else priv->line_control = (CONTROL_DTR | CONTROL_RTS); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(dev, "%s - sending %d stop_bits, %d parity_enable, %d parity_type, %d data_bits (+5)\n", __func__, stop_bits, parity_enable, parity_type, data_bits); cypress_serial_control(tty, port, tty_get_baud_rate(tty), data_bits, stop_bits, parity_enable, parity_type, 0, CYPRESS_SET_CONFIG); /* we perform a CYPRESS_GET_CONFIG so that the current settings are * filled into the private structure this should confirm that all is * working if it returns what we just set */ cypress_serial_control(tty, port, 0, 0, 0, 0, 0, 0, CYPRESS_GET_CONFIG); /* Here we can define custom tty settings for devices; the main tty * termios flag base comes from empeg.c */ spin_lock_irqsave(&priv->lock, flags); if (priv->chiptype == CT_EARTHMATE && priv->baud_rate == 4800) { dev_dbg(dev, "Using custom termios settings for a baud rate of 4800bps.\n"); /* define custom termios settings for NMEA protocol */ tty->termios.c_iflag /* input modes - */ &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input char */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ tty->termios.c_oflag /* output modes */ &= ~OPOST; /* disable postprocess output char */ tty->termios.c_lflag /* line discipline modes */ &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ } /* CT_CYPHIDCOM: Application should handle this for device */ linechange = (priv->line_control != oldlines); spin_unlock_irqrestore(&priv->lock, flags); /* if necessary, set lines */ if (linechange) { priv->cmd_ctrl = 1; cypress_write(tty, port, NULL, 0); } } /* cypress_set_termios */ /* returns amount of data still left in soft buffer */ static unsigned int cypress_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned int chars; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); chars = kfifo_len(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars); return chars; } static void cypress_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); priv->rx_flags = THROTTLED; spin_unlock_irq(&priv->lock); } static void cypress_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); int actually_throttled, result; spin_lock_irq(&priv->lock); actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED; priv->rx_flags = 0; spin_unlock_irq(&priv->lock); if (!priv->comm_is_ok) return; if (actually_throttled) { result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting read urb, " "error %d\n", __func__, result); cypress_set_dead(port); } } } static void cypress_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &urb->dev->dev; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; unsigned long flags; char tty_flag = TTY_NORMAL; int bytes = 0; int result; int i = 0; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* precursor to disconnect so just go away */ return; case -EPIPE: /* Can't call usb_clear_halt while in_interrupt */ fallthrough; default: /* something ugly is going on... */ dev_err(dev, "%s - unexpected nonzero read status received: %d\n", __func__, status); cypress_set_dead(port); return; } spin_lock_irqsave(&priv->lock, flags); if (priv->rx_flags & THROTTLED) { dev_dbg(dev, "%s - now throttling\n", __func__); priv->rx_flags |= ACTUALLY_THROTTLED; spin_unlock_irqrestore(&priv->lock, flags); return; } spin_unlock_irqrestore(&priv->lock, flags); tty = tty_port_tty_get(&port->port); if (!tty) { dev_dbg(dev, "%s - bad tty pointer - exiting\n", __func__); return; } spin_lock_irqsave(&priv->lock, flags); result = urb->actual_length; switch (priv->pkt_fmt) { default: case packet_format_1: /* This is for the CY7C64013... */ priv->current_status = data[0] & 0xF8; bytes = data[1] + 2; i = 2; break; case packet_format_2: /* This is for the CY7C63743... */ priv->current_status = data[0] & 0xF8; bytes = (data[0] & 0x07) + 1; i = 1; break; } spin_unlock_irqrestore(&priv->lock, flags); if (result < bytes) { dev_dbg(dev, "%s - wrong packet size - received %d bytes but packet said %d bytes\n", __func__, result, bytes); goto continue_read; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); spin_lock_irqsave(&priv->lock, flags); /* check to see if status has changed */ if (priv->current_status != priv->prev_status) { u8 delta = priv->current_status ^ priv->prev_status; if (delta & UART_MSR_MASK) { if (delta & UART_CTS) port->icount.cts++; if (delta & UART_DSR) port->icount.dsr++; if (delta & UART_RI) port->icount.rng++; if (delta & UART_CD) port->icount.dcd++; wake_up_interruptible(&port->port.delta_msr_wait); } priv->prev_status = priv->current_status; } spin_unlock_irqrestore(&priv->lock, flags); /* hangup, as defined in acm.c... this might be a bad place for it * though */ if (tty && !C_CLOCAL(tty) && !(priv->current_status & UART_CD)) { dev_dbg(dev, "%s - calling hangup\n", __func__); tty_hangup(tty); goto continue_read; } /* There is one error bit... I'm assuming it is a parity error * indicator as the generic firmware will set this bit to 1 if a * parity error occurs. * I can not find reference to any other error events. */ spin_lock_irqsave(&priv->lock, flags); if (priv->current_status & CYP_ERROR) { spin_unlock_irqrestore(&priv->lock, flags); tty_flag = TTY_PARITY; dev_dbg(dev, "%s - Parity Error detected\n", __func__); } else spin_unlock_irqrestore(&priv->lock, flags); /* process read if there is data other than line status */ if (bytes > i) { tty_insert_flip_string_fixed_flag(&port->port, data + i, tty_flag, bytes - i); tty_flip_buffer_push(&port->port); } spin_lock_irqsave(&priv->lock, flags); /* control and status byte(s) are also counted */ priv->bytes_in += bytes; spin_unlock_irqrestore(&priv->lock, flags); continue_read: tty_kref_put(tty); /* Continue trying to always read */ if (priv->comm_is_ok) { usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev, usb_rcvintpipe(port->serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, cypress_read_int_callback, port, priv->read_urb_interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result && result != -EPERM) { dev_err(dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); cypress_set_dead(port); } } } /* cypress_read_int_callback */ static void cypress_write_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &urb->dev->dev; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); priv->write_urb_in_use = 0; return; case -EPIPE: /* Cannot call usb_clear_halt while in_interrupt */ fallthrough; default: dev_err(dev, "%s - unexpected nonzero write status received: %d\n", __func__, status); cypress_set_dead(port); break; } priv->write_urb_in_use = 0; /* send any buffered data */ cypress_send(port); } module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(stats, bool, 0644); MODULE_PARM_DESC(stats, "Enable statistics or not"); module_param(interval, int, 0644); MODULE_PARM_DESC(interval, "Overrides interrupt interval"); module_param(unstable_bauds, bool, 0644); MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates"); |
| 2 1 2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 | // SPDX-License-Identifier: GPL-2.0+ /* * Berkshire USB-PC Watchdog Card Driver * * (c) Copyright 2004-2007 Wim Van Sebroeck <wim@iguana.be>. * * Based on source code of the following authors: * Ken Hollis <kenji@bitgate.com>, * Alan Cox <alan@lxorguk.ukuu.org.uk>, * Matt Domsch <Matt_Domsch@dell.com>, * Rob Radez <rob@osinvestor.com>, * Greg Kroah-Hartman <greg@kroah.com> * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * * Thanks also to Simon Machell at Berkshire Products Inc. for * providing the test hardware. More info is available at * http://www.berkprod.com/ or http://www.pcwatchdog.com/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ #include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ #include <linux/usb.h> /* For USB functions */ #include <linux/slab.h> /* For kmalloc, ... */ #include <linux/mutex.h> /* For mutex locking */ #include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ /* Module and Version Information */ #define DRIVER_VERSION "1.02" #define DRIVER_AUTHOR "Wim Van Sebroeck <wim@iguana.be>" #define DRIVER_DESC "Berkshire USB-PC Watchdog driver" #define DRIVER_NAME "pcwd_usb" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define WATCHDOG_HEARTBEAT 0 /* default heartbeat = delay-time from dip-switches */ static int heartbeat = WATCHDOG_HEARTBEAT; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " "(0<heartbeat<65536 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* The vendor and product id's for the USB-PC Watchdog card */ #define USB_PCWD_VENDOR_ID 0x0c98 #define USB_PCWD_PRODUCT_ID 0x1140 /* table of devices that work with this driver */ static const struct usb_device_id usb_pcwd_table[] = { { USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_pcwd_table); /* according to documentation max. time to process a command for the USB * watchdog card is 100 or 200 ms, so we give it 250 ms to do it's job */ #define USB_COMMAND_TIMEOUT 250 /* Watchdog's internal commands */ #define CMD_READ_TEMP 0x02 /* Read Temperature; Re-trigger Watchdog */ #define CMD_TRIGGER CMD_READ_TEMP #define CMD_GET_STATUS 0x04 /* Get Status Information */ #define CMD_GET_FIRMWARE_VERSION 0x08 /* Get Firmware Version */ #define CMD_GET_DIP_SWITCH_SETTINGS 0x0c /* Get Dip Switch Settings */ #define CMD_READ_WATCHDOG_TIMEOUT 0x18 /* Read Current Watchdog Time */ #define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 /* Write Current WatchdogTime */ #define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */ #define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG /* Watchdog's Dip Switch heartbeat values */ static const int heartbeat_tbl[] = { 5, /* OFF-OFF-OFF = 5 Sec */ 10, /* OFF-OFF-ON = 10 Sec */ 30, /* OFF-ON-OFF = 30 Sec */ 60, /* OFF-ON-ON = 1 Min */ 300, /* ON-OFF-OFF = 5 Min */ 600, /* ON-OFF-ON = 10 Min */ 1800, /* ON-ON-OFF = 30 Min */ 3600, /* ON-ON-ON = 1 hour */ }; /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; /* some internal variables */ static unsigned long is_active; static char expect_release; /* Structure to hold all of our device specific stuff */ struct usb_pcwd_private { /* save off the usb device pointer */ struct usb_device *udev; /* the interface for this device */ struct usb_interface *interface; /* the interface number used for cmd's */ unsigned int interface_number; /* the buffer to intr data */ unsigned char *intr_buffer; /* the dma address for the intr buffer */ dma_addr_t intr_dma; /* the size of the intr buffer */ size_t intr_size; /* the urb used for the intr pipe */ struct urb *intr_urb; /* The command that is reported back */ unsigned char cmd_command; /* The data MSB that is reported back */ unsigned char cmd_data_msb; /* The data LSB that is reported back */ unsigned char cmd_data_lsb; /* true if we received a report after a command */ atomic_t cmd_received; /* Wether or not the device exists */ int exists; /* locks this structure */ struct mutex mtx; }; static struct usb_pcwd_private *usb_pcwd_device; /* prevent races between open() and disconnect() */ static DEFINE_MUTEX(disconnect_mutex); /* local function prototypes */ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_device_id *id); static void usb_pcwd_disconnect(struct usb_interface *interface); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver usb_pcwd_driver = { .name = DRIVER_NAME, .probe = usb_pcwd_probe, .disconnect = usb_pcwd_disconnect, .id_table = usb_pcwd_table, }; static void usb_pcwd_intr_done(struct urb *urb) { struct usb_pcwd_private *usb_pcwd = (struct usb_pcwd_private *)urb->context; unsigned char *data = usb_pcwd->intr_buffer; struct device *dev = &usb_pcwd->interface->dev; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d", __func__, urb->status); return; /* -EPIPE: should clear the halt */ default: /* error */ dev_dbg(dev, "%s - nonzero urb status received: %d", __func__, urb->status); goto resubmit; } dev_dbg(dev, "received following data cmd=0x%02x msb=0x%02x lsb=0x%02x", data[0], data[1], data[2]); usb_pcwd->cmd_command = data[0]; usb_pcwd->cmd_data_msb = data[1]; usb_pcwd->cmd_data_lsb = data[2]; /* notify anyone waiting that the cmd has finished */ atomic_set(&usb_pcwd->cmd_received, 1); resubmit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) pr_err("can't resubmit intr, usb_submit_urb failed with result %d\n", retval); } static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned char cmd, unsigned char *msb, unsigned char *lsb) { int got_response, count; unsigned char *buf; /* We will not send any commands if the USB PCWD device does * not exist */ if ((!usb_pcwd) || (!usb_pcwd->exists)) return -1; buf = kmalloc(6, GFP_KERNEL); if (buf == NULL) return 0; /* The USB PC Watchdog uses a 6 byte report format. * The board currently uses only 3 of the six bytes of the report. */ buf[0] = cmd; /* Byte 0 = CMD */ buf[1] = *msb; /* Byte 1 = Data MSB */ buf[2] = *lsb; /* Byte 2 = Data LSB */ buf[3] = buf[4] = buf[5] = 0; /* All other bytes not used */ dev_dbg(&usb_pcwd->interface->dev, "sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x", buf[0], buf[1], buf[2]); atomic_set(&usb_pcwd->cmd_received, 0); if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0), HID_REQ_SET_REPORT, HID_DT_REPORT, 0x0200, usb_pcwd->interface_number, buf, 6, USB_COMMAND_TIMEOUT) != 6) { dev_dbg(&usb_pcwd->interface->dev, "usb_pcwd_send_command: error in usb_control_msg for cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb); } /* wait till the usb card processed the command, * with a max. timeout of USB_COMMAND_TIMEOUT */ got_response = 0; for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response); count++) { mdelay(1); if (atomic_read(&usb_pcwd->cmd_received)) got_response = 1; } if ((got_response) && (cmd == usb_pcwd->cmd_command)) { /* read back response */ *msb = usb_pcwd->cmd_data_msb; *lsb = usb_pcwd->cmd_data_lsb; } kfree(buf); return got_response; } static int usb_pcwd_start(struct usb_pcwd_private *usb_pcwd) { unsigned char msb = 0x00; unsigned char lsb = 0x00; int retval; /* Enable Watchdog */ retval = usb_pcwd_send_command(usb_pcwd, CMD_ENABLE_WATCHDOG, &msb, &lsb); if ((retval == 0) || (lsb == 0)) { pr_err("Card did not acknowledge enable attempt\n"); return -1; } return 0; } static int usb_pcwd_stop(struct usb_pcwd_private *usb_pcwd) { unsigned char msb = 0xA5; unsigned char lsb = 0xC3; int retval; /* Disable Watchdog */ retval = usb_pcwd_send_command(usb_pcwd, CMD_DISABLE_WATCHDOG, &msb, &lsb); if ((retval == 0) || (lsb != 0)) { pr_err("Card did not acknowledge disable attempt\n"); return -1; } return 0; } static int usb_pcwd_keepalive(struct usb_pcwd_private *usb_pcwd) { unsigned char dummy; /* Re-trigger Watchdog */ usb_pcwd_send_command(usb_pcwd, CMD_TRIGGER, &dummy, &dummy); return 0; } static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t) { unsigned char msb = t / 256; unsigned char lsb = t % 256; if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; /* Write new heartbeat to watchdog */ usb_pcwd_send_command(usb_pcwd, CMD_WRITE_WATCHDOG_TIMEOUT, &msb, &lsb); heartbeat = t; return 0; } static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, int *temperature) { unsigned char msb = 0x00; unsigned char lsb = 0x00; usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb); /* * Convert celsius to fahrenheit, since this was * the decided 'standard' for this return value. */ *temperature = (lsb * 9 / 5) + 32; return 0; } static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd, int *time_left) { unsigned char msb = 0x00; unsigned char lsb = 0x00; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ usb_pcwd_send_command(usb_pcwd, CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb); *time_left = (msb << 8) + lsb; return 0; } /* * /dev/watchdog handling */ static ssize_t usb_pcwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ expect_release = 0; /* scan to see whether or not we got the * magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } /* someone wrote to us, we should reload the timer */ usb_pcwd_keepalive(usb_pcwd_device); } return len; } static long usb_pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = DRIVER_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_GETTEMP: { int temperature; if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature)) return -EFAULT; return put_user(temperature, p); } case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { usb_pcwd_stop(usb_pcwd_device); retval = 0; } if (new_options & WDIOS_ENABLECARD) { usb_pcwd_start(usb_pcwd_device); retval = 0; } return retval; } case WDIOC_KEEPALIVE: usb_pcwd_keepalive(usb_pcwd_device); return 0; case WDIOC_SETTIMEOUT: { int new_heartbeat; if (get_user(new_heartbeat, p)) return -EFAULT; if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat)) return -EINVAL; usb_pcwd_keepalive(usb_pcwd_device); } fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); case WDIOC_GETTIMELEFT: { int time_left; if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left)) return -EFAULT; return put_user(time_left, p); } default: return -ENOTTY; } } static int usb_pcwd_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &is_active)) return -EBUSY; /* Activate */ usb_pcwd_start(usb_pcwd_device); usb_pcwd_keepalive(usb_pcwd_device); return stream_open(inode, file); } static int usb_pcwd_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (expect_release == 42) { usb_pcwd_stop(usb_pcwd_device); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); usb_pcwd_keepalive(usb_pcwd_device); } expect_release = 0; clear_bit(0, &is_active); return 0; } /* * /dev/temperature handling */ static ssize_t usb_pcwd_temperature_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int temperature; if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature)) return -EFAULT; if (copy_to_user(data, &temperature, 1)) return -EFAULT; return 1; } static int usb_pcwd_temperature_open(struct inode *inode, struct file *file) { return stream_open(inode, file); } static int usb_pcwd_temperature_release(struct inode *inode, struct file *file) { return 0; } /* * Notify system */ static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) usb_pcwd_stop(usb_pcwd_device); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations usb_pcwd_fops = { .owner = THIS_MODULE, .write = usb_pcwd_write, .unlocked_ioctl = usb_pcwd_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = usb_pcwd_open, .release = usb_pcwd_release, }; static struct miscdevice usb_pcwd_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &usb_pcwd_fops, }; static const struct file_operations usb_pcwd_temperature_fops = { .owner = THIS_MODULE, .read = usb_pcwd_temperature_read, .open = usb_pcwd_temperature_open, .release = usb_pcwd_temperature_release, }; static struct miscdevice usb_pcwd_temperature_miscdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &usb_pcwd_temperature_fops, }; static struct notifier_block usb_pcwd_notifier = { .notifier_call = usb_pcwd_notify_sys, }; /* * usb_pcwd_delete */ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) { usb_free_urb(usb_pcwd->intr_urb); usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size, usb_pcwd->intr_buffer, usb_pcwd->intr_dma); kfree(usb_pcwd); } /* * usb_pcwd_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_pcwd_private *usb_pcwd = NULL; int pipe; int retval = -ENOMEM; int got_fw_rev; unsigned char fw_rev_major, fw_rev_minor; char fw_ver_str[20]; unsigned char option_switches, dummy; cards_found++; if (cards_found > 1) { pr_err("This driver only supports 1 device\n"); return -ENODEV; } /* get the active interface descriptor */ iface_desc = interface->cur_altsetting; /* check out that we have a HID device */ if (!(iface_desc->desc.bInterfaceClass == USB_CLASS_HID)) { pr_err("The device isn't a Human Interface Device\n"); return -ENODEV; } if (iface_desc->desc.bNumEndpoints < 1) return -ENODEV; /* check out the endpoint: it has to be Interrupt & IN */ endpoint = &iface_desc->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) { /* we didn't find a Interrupt endpoint with direction IN */ pr_err("Couldn't find an INTR & IN endpoint\n"); return -ENODEV; } /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); /* allocate memory for our device and initialize it */ usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL); if (usb_pcwd == NULL) goto error; usb_pcwd_device = usb_pcwd; mutex_init(&usb_pcwd->mtx); usb_pcwd->udev = udev; usb_pcwd->interface = interface; usb_pcwd->interface_number = iface_desc->desc.bInterfaceNumber; usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); /* set up the memory buffer's */ usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size, GFP_KERNEL, &usb_pcwd->intr_dma); if (!usb_pcwd->intr_buffer) { pr_err("Out of memory\n"); goto error; } /* allocate the urb's */ usb_pcwd->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!usb_pcwd->intr_urb) goto error; /* initialise the intr urb's */ usb_fill_int_urb(usb_pcwd->intr_urb, udev, pipe, usb_pcwd->intr_buffer, usb_pcwd->intr_size, usb_pcwd_intr_done, usb_pcwd, endpoint->bInterval); usb_pcwd->intr_urb->transfer_dma = usb_pcwd->intr_dma; usb_pcwd->intr_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* register our interrupt URB with the USB system */ if (usb_submit_urb(usb_pcwd->intr_urb, GFP_KERNEL)) { pr_err("Problem registering interrupt URB\n"); retval = -EIO; /* failure */ goto error; } /* The device exists and can be communicated with */ usb_pcwd->exists = 1; /* disable card */ usb_pcwd_stop(usb_pcwd); /* Get the Firmware Version */ got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); if (got_fw_rev) sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); else sprintf(fw_ver_str, "<card no answer>"); pr_info("Found card (Firmware: %s) with temp option\n", fw_ver_str); /* Get switch settings */ usb_pcwd_send_command(usb_pcwd, CMD_GET_DIP_SWITCH_SETTINGS, &dummy, &option_switches); pr_info("Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n", option_switches, ((option_switches & 0x10) ? "ON" : "OFF"), ((option_switches & 0x08) ? "ON" : "OFF")); /* If heartbeat = 0 then we use the heartbeat from the dip-switches */ if (heartbeat == 0) heartbeat = heartbeat_tbl[(option_switches & 0x07)]; /* Check that the heartbeat value is within it's range ; * if not reset to the default */ if (usb_pcwd_set_heartbeat(usb_pcwd, heartbeat)) { usb_pcwd_set_heartbeat(usb_pcwd, WATCHDOG_HEARTBEAT); pr_info("heartbeat value must be 0<heartbeat<65536, using %d\n", WATCHDOG_HEARTBEAT); } retval = register_reboot_notifier(&usb_pcwd_notifier); if (retval != 0) { pr_err("cannot register reboot notifier (err=%d)\n", retval); goto error; } retval = misc_register(&usb_pcwd_temperature_miscdev); if (retval != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", TEMP_MINOR, retval); goto err_out_unregister_reboot; } retval = misc_register(&usb_pcwd_miscdev); if (retval != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, retval); goto err_out_misc_deregister; } /* we can register the device now, as it is ready */ usb_set_intfdata(interface, usb_pcwd); pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; err_out_misc_deregister: misc_deregister(&usb_pcwd_temperature_miscdev); err_out_unregister_reboot: unregister_reboot_notifier(&usb_pcwd_notifier); error: if (usb_pcwd) usb_pcwd_delete(usb_pcwd); usb_pcwd_device = NULL; return retval; } /* * usb_pcwd_disconnect * * Called by the usb core when the device is removed from the system. * * This routine guarantees that the driver will not submit any more urbs * by clearing dev->udev. */ static void usb_pcwd_disconnect(struct usb_interface *interface) { struct usb_pcwd_private *usb_pcwd; /* prevent races with open() */ mutex_lock(&disconnect_mutex); usb_pcwd = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); mutex_lock(&usb_pcwd->mtx); /* Stop the timer before we leave */ if (!nowayout) usb_pcwd_stop(usb_pcwd); /* We should now stop communicating with the USB PCWD device */ usb_pcwd->exists = 0; /* Deregister */ misc_deregister(&usb_pcwd_miscdev); misc_deregister(&usb_pcwd_temperature_miscdev); unregister_reboot_notifier(&usb_pcwd_notifier); mutex_unlock(&usb_pcwd->mtx); /* Delete the USB PCWD device */ usb_pcwd_delete(usb_pcwd); cards_found--; mutex_unlock(&disconnect_mutex); pr_info("USB PC Watchdog disconnected\n"); } module_usb_driver(usb_pcwd_driver); |
| 19 18 19 19 19 17 17 17 17 17 19 19 19 19 19 19 19 19 19 18 19 19 19 18 19 18 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 | // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 configuration hooks for cfg80211 * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2025 Intel Corporation */ #include <linux/ieee80211.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <linux/rcupdate.h> #include <linux/fips.h> #include <linux/if_ether.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" static struct ieee80211_link_data * ieee80211_link_or_deflink(struct ieee80211_sub_if_data *sdata, int link_id, bool require_valid) { struct ieee80211_link_data *link; if (link_id < 0) { /* * For keys, if sdata is not an MLD, we might not use * the return value at all (if it's not a pairwise key), * so in that case (require_valid==false) don't error. */ if (require_valid && ieee80211_vif_is_mld(&sdata->vif)) return ERR_PTR(-EINVAL); return &sdata->deflink; } link = sdata_dereference(sdata->link[link_id], sdata); if (!link) return ERR_PTR(-ENOLINK); return link; } static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { bool mu_mimo_groups = false; bool mu_mimo_follow = false; if (params->vht_mumimo_groups) { u64 membership; BUILD_BUG_ON(sizeof(membership) != WLAN_MEMBERSHIP_LEN); memcpy(sdata->vif.bss_conf.mu_group.membership, params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); memcpy(sdata->vif.bss_conf.mu_group.position, params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, WLAN_USER_POSITION_LEN); /* don't care about endianness - just check for 0 */ memcpy(&membership, params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); mu_mimo_groups = membership != 0; /* Unset following if configured explicitly */ eth_broadcast_addr(sdata->u.mntr.mu_follow_addr); } if (params->vht_mumimo_follow_addr) { mu_mimo_follow = is_valid_ether_addr(params->vht_mumimo_follow_addr); ether_addr_copy(sdata->u.mntr.mu_follow_addr, params->vht_mumimo_follow_addr); /* Unset current membership until a management frame is RXed */ memset(sdata->vif.bss_conf.mu_group.membership, 0, WLAN_MEMBERSHIP_LEN); } sdata->vif.bss_conf.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow; /* Notify only after setting mu_mimo_owner */ if (sdata->vif.bss_conf.mu_mimo_owner && sdata->flags & IEEE80211_SDATA_IN_DRIVER) ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_MU_GROUPS); } static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *monitor_sdata = NULL; /* check flags first */ if (params->flags && ieee80211_sdata_running(sdata)) { u32 mask = MONITOR_FLAG_ACTIVE; /* * Prohibit MONITOR_FLAG_ACTIVE to be changed * while the interface is up. * Else we would need to add a lot of cruft * to update everything: * monitor and all fif_* counters * reconfigure hardware */ if ((params->flags & mask) != (sdata->u.mntr.flags & mask)) return -EBUSY; } /* validate whether MU-MIMO can be configured */ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) return -EOPNOTSUPP; /* Also update dependent monitor_sdata if required */ if (test_bit(SDATA_STATE_RUNNING, &sdata->state) && !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) monitor_sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); /* apply all changes now - no failures allowed */ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) || ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { /* This is copied in when the VIF is activated */ ieee80211_set_mu_mimo_follow(sdata, params); if (monitor_sdata) ieee80211_set_mu_mimo_follow(monitor_sdata, params); } if (params->flags) { if (ieee80211_sdata_running(sdata)) { ieee80211_adjust_monitor_flags(sdata, -1); sdata->u.mntr.flags = params->flags; ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); } else { /* * Because the interface is down, ieee80211_do_stop * and ieee80211_do_open take care of "everything" * mentioned in the comment above. */ sdata->u.mntr.flags = params->flags; } } return 0; } static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, struct cfg80211_mbssid_config *params, struct ieee80211_bss_conf *link_conf) { struct ieee80211_sub_if_data *tx_sdata; struct ieee80211_bss_conf *old; link_conf->bssid_index = 0; link_conf->nontransmitted = false; link_conf->ema_ap = false; link_conf->bssid_indicator = 0; if (sdata->vif.type != NL80211_IFTYPE_AP || !params->tx_wdev) return -EINVAL; old = sdata_dereference(link_conf->tx_bss_conf, sdata); if (old) return -EALREADY; tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params->tx_wdev); if (!tx_sdata) return -EINVAL; if (tx_sdata == sdata) { rcu_assign_pointer(link_conf->tx_bss_conf, link_conf); } else { struct ieee80211_bss_conf *tx_bss_conf; tx_bss_conf = sdata_dereference(tx_sdata->vif.link_conf[params->tx_link_id], sdata); if (rcu_access_pointer(tx_bss_conf->tx_bss_conf) != tx_bss_conf) return -EINVAL; rcu_assign_pointer(link_conf->tx_bss_conf, tx_bss_conf); link_conf->nontransmitted = true; link_conf->bssid_index = params->index; link_conf->bssid_indicator = tx_bss_conf->bssid_indicator; } if (params->ema) link_conf->ema_ap = true; return 0; } static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct wireless_dev *wdev; struct ieee80211_sub_if_data *sdata; int err; err = ieee80211_if_add(local, name, name_assign_type, &wdev, type, params); if (err) return ERR_PTR(err); sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (type == NL80211_IFTYPE_MONITOR) { err = ieee80211_set_mon_options(sdata, params); if (err) { ieee80211_if_remove(sdata); return NULL; } } /* Let the driver know that an interface is going to be added. * Indicate so only for interface types that will be added to the * driver. */ switch (type) { case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_MONITOR: if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) || !(params->flags & MONITOR_FLAG_ACTIVE)) break; fallthrough; default: drv_prep_add_interface(local, ieee80211_vif_type_p2p(&sdata->vif)); break; } return wdev; } static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev)); return 0; } static int ieee80211_change_iface(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret; lockdep_assert_wiphy(local->hw.wiphy); ret = ieee80211_if_change_type(sdata, type); if (ret) return ret; if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) { RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); ieee80211_check_fast_rx_iface(sdata); } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (params->use_4addr == ifmgd->use_4addr) return 0; /* FIXME: no support for 4-addr MLO yet */ if (ieee80211_vif_is_mld(&sdata->vif)) return -EOPNOTSUPP; sdata->u.mgd.use_4addr = params->use_4addr; if (!ifmgd->associated) return 0; sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid); if (sta) drv_sta_set_4addr(local, sdata, &sta->sta, params->use_4addr); if (params->use_4addr) ieee80211_send_4addr_nullfunc(local, sdata); } if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { ret = ieee80211_set_mon_options(sdata, params); if (ret) return ret; } return 0; } static int ieee80211_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; return ieee80211_do_open(wdev, true); } static void ieee80211_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev)); } static void ieee80211_nan_conf_free(struct cfg80211_nan_conf *conf) { kfree(conf->cluster_id); kfree(conf->extra_nan_attrs); kfree(conf->vendor_elems); memset(conf, 0, sizeof(*conf)); } static void ieee80211_stop_nan(struct wiphy *wiphy, struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (!sdata->u.nan.started) return; drv_stop_nan(sdata->local, sdata); sdata->u.nan.started = false; ieee80211_nan_conf_free(&sdata->u.nan.conf); ieee80211_sdata_stop(sdata); ieee80211_recalc_idle(sdata->local); } static int ieee80211_nan_conf_copy(struct cfg80211_nan_conf *dst, struct cfg80211_nan_conf *src, u32 changes) { if (changes & CFG80211_NAN_CONF_CHANGED_PREF) dst->master_pref = src->master_pref; if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) dst->bands = src->bands; if (changes & CFG80211_NAN_CONF_CHANGED_CONFIG) { dst->scan_period = src->scan_period; dst->scan_dwell_time = src->scan_dwell_time; dst->discovery_beacon_interval = src->discovery_beacon_interval; dst->enable_dw_notification = src->enable_dw_notification; memcpy(&dst->band_cfgs, &src->band_cfgs, sizeof(dst->band_cfgs)); kfree(dst->cluster_id); dst->cluster_id = NULL; kfree(dst->extra_nan_attrs); dst->extra_nan_attrs = NULL; dst->extra_nan_attrs_len = 0; kfree(dst->vendor_elems); dst->vendor_elems = NULL; dst->vendor_elems_len = 0; if (src->cluster_id) { dst->cluster_id = kmemdup(src->cluster_id, ETH_ALEN, GFP_KERNEL); if (!dst->cluster_id) goto no_mem; } if (src->extra_nan_attrs && src->extra_nan_attrs_len) { dst->extra_nan_attrs = kmemdup(src->extra_nan_attrs, src->extra_nan_attrs_len, GFP_KERNEL); if (!dst->extra_nan_attrs) goto no_mem; dst->extra_nan_attrs_len = src->extra_nan_attrs_len; } if (src->vendor_elems && src->vendor_elems_len) { dst->vendor_elems = kmemdup(src->vendor_elems, src->vendor_elems_len, GFP_KERNEL); if (!dst->vendor_elems) goto no_mem; dst->vendor_elems_len = src->vendor_elems_len; } } return 0; no_mem: ieee80211_nan_conf_free(dst); return -ENOMEM; } static int ieee80211_start_nan(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (sdata->u.nan.started) return -EALREADY; ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; ret = ieee80211_do_open(wdev, true); if (ret) return ret; ret = drv_start_nan(sdata->local, sdata, conf); if (ret) { ieee80211_sdata_stop(sdata); return ret; } sdata->u.nan.started = true; ieee80211_recalc_idle(sdata->local); ret = ieee80211_nan_conf_copy(&sdata->u.nan.conf, conf, 0xFFFFFFFF); if (ret) { ieee80211_stop_nan(wiphy, wdev); return ret; } return 0; } static int ieee80211_nan_change_conf(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct cfg80211_nan_conf new_conf = {}; int ret = 0; if (sdata->vif.type != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; if (!changes) return 0; /* First make a full copy of the previous configuration and then apply * the changes. This might be a little wasteful, but it is simpler. */ ret = ieee80211_nan_conf_copy(&new_conf, &sdata->u.nan.conf, 0xFFFFFFFF); if (ret < 0) return ret; ret = ieee80211_nan_conf_copy(&new_conf, conf, changes); if (ret < 0) return ret; ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes); if (ret) { ieee80211_nan_conf_free(&new_conf); } else { ieee80211_nan_conf_free(&sdata->u.nan.conf); sdata->u.nan.conf = new_conf; } return ret; } static int ieee80211_add_nan_func(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); int ret; if (sdata->vif.type != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; spin_lock_bh(&sdata->u.nan.func_lock); ret = idr_alloc(&sdata->u.nan.function_inst_ids, nan_func, 1, sdata->local->hw.max_nan_de_entries + 1, GFP_ATOMIC); spin_unlock_bh(&sdata->u.nan.func_lock); if (ret < 0) return ret; nan_func->instance_id = ret; WARN_ON(nan_func->instance_id == 0); ret = drv_add_nan_func(sdata->local, sdata, nan_func); if (ret) { spin_lock_bh(&sdata->u.nan.func_lock); idr_remove(&sdata->u.nan.function_inst_ids, nan_func->instance_id); spin_unlock_bh(&sdata->u.nan.func_lock); } return ret; } static struct cfg80211_nan_func * ieee80211_find_nan_func_by_cookie(struct ieee80211_sub_if_data *sdata, u64 cookie) { struct cfg80211_nan_func *func; int id; lockdep_assert_held(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) { if (func->cookie == cookie) return func; } return NULL; } static void ieee80211_del_nan_func(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct cfg80211_nan_func *func; u8 instance_id = 0; if (sdata->vif.type != NL80211_IFTYPE_NAN || !ieee80211_sdata_running(sdata)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = ieee80211_find_nan_func_by_cookie(sdata, cookie); if (func) instance_id = func->instance_id; spin_unlock_bh(&sdata->u.nan.func_lock); if (instance_id) drv_del_nan_func(sdata->local, sdata, instance_id); } static int ieee80211_set_noack_map(struct wiphy *wiphy, struct net_device *dev, u16 noack_map) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata->noack_map = noack_map; ieee80211_check_fast_xmit_iface(sdata); return 0; } static int ieee80211_set_tx(struct ieee80211_sub_if_data *sdata, const u8 *mac_addr, u8 key_idx) { struct ieee80211_local *local = sdata->local; struct ieee80211_key *key; struct sta_info *sta; int ret = -EINVAL; if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID)) return -EINVAL; sta = sta_info_get_bss(sdata, mac_addr); if (!sta) return -EINVAL; if (sta->ptk_idx == key_idx) return 0; key = wiphy_dereference(local->hw.wiphy, sta->ptk[key_idx]); if (key && key->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) ret = ieee80211_set_tx_key(key); return ret; } static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link = ieee80211_link_or_deflink(sdata, link_id, false); struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; struct ieee80211_key *key; int err; lockdep_assert_wiphy(local->hw.wiphy); if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; if (IS_ERR(link)) return PTR_ERR(link); if (WARN_ON(pairwise && link_id >= 0)) return -EINVAL; if (pairwise && params->mode == NL80211_KEY_SET_TX) return ieee80211_set_tx(sdata, mac_addr, key_idx); /* reject WEP and TKIP keys if WEP failed to initialize */ switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_WEP104: if (link_id >= 0) return -EINVAL; if (WARN_ON_ONCE(fips_enabled)) return -EINVAL; break; default: break; } key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len, params->key, params->seq_len, params->seq); if (IS_ERR(key)) return PTR_ERR(key); if (pairwise) { key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; key->conf.link_id = -1; } else { key->conf.link_id = link->link_id; } if (params->mode == NL80211_KEY_NO_TX) key->conf.flags |= IEEE80211_KEY_FLAG_NO_AUTO_TX; if (mac_addr) { sta = sta_info_get_bss(sdata, mac_addr); /* * The ASSOC test makes sure the driver is ready to * receive the key. When wpa_supplicant has roamed * using FT, it attempts to set the key before * association has completed, this rejects that attempt * so it will set the key again after association. * * TODO: accept the key if we have a station entry and * add it to the device after the station. */ if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { ieee80211_key_free_unused(key); return -ENOENT; } } switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* Keys without a station are used for TX only */ if (sta && test_sta_flag(sta, WLAN_STA_MFP)) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; case NL80211_IFTYPE_ADHOC: /* no MFP (yet) */ break; case NL80211_IFTYPE_MESH_POINT: #ifdef CONFIG_MAC80211_MESH if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; #endif case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_OCB: /* shouldn't happen */ WARN_ON_ONCE(1); break; } err = ieee80211_key_link(key, link, sta); /* KRACK protection, shouldn't happen but just silently accept key */ if (err == -EALREADY) err = 0; return err; } static struct ieee80211_key * ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, int link_id, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct ieee80211_local *local __maybe_unused = sdata->local; struct ieee80211_link_data *link = &sdata->deflink; struct ieee80211_key *key; if (link_id >= 0) { link = sdata_dereference(sdata->link[link_id], sdata); if (!link) return NULL; } if (mac_addr) { struct sta_info *sta; struct link_sta_info *link_sta; sta = sta_info_get_bss(sdata, mac_addr); if (!sta) return NULL; if (link_id >= 0) { link_sta = rcu_dereference_check(sta->link[link_id], lockdep_is_held(&local->hw.wiphy->mtx)); if (!link_sta) return NULL; } else { link_sta = &sta->deflink; } if (pairwise && key_idx < NUM_DEFAULT_KEYS) return wiphy_dereference(local->hw.wiphy, sta->ptk[key_idx]); if (!pairwise && key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) return wiphy_dereference(local->hw.wiphy, link_sta->gtk[key_idx]); return NULL; } if (pairwise && key_idx < NUM_DEFAULT_KEYS) return wiphy_dereference(local->hw.wiphy, sdata->keys[key_idx]); key = wiphy_dereference(local->hw.wiphy, link->gtk[key_idx]); if (key) return key; /* or maybe it was a WEP key */ if (key_idx < NUM_DEFAULT_KEYS) return wiphy_dereference(local->hw.wiphy, sdata->keys[key_idx]); return NULL; } static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_key *key; lockdep_assert_wiphy(local->hw.wiphy); key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); if (!key) return -ENOENT; ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); return 0; } static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params *params)) { struct ieee80211_sub_if_data *sdata; u8 seq[6] = {0}; struct key_params params; struct ieee80211_key *key; u64 pn64; u32 iv32; u16 iv16; int err = -ENOENT; struct ieee80211_key_seq kseq = {}; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); if (!key) goto out; memset(¶ms, 0, sizeof(params)); params.cipher = key->conf.cipher; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: pn64 = atomic64_read(&key->conf.tx_pn); iv32 = TKIP_PN_TO_IV32(pn64); iv16 = TKIP_PN_TO_IV16(pn64); if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { drv_get_key_seq(sdata->local, key, &kseq); iv32 = kseq.tkip.iv32; iv16 = kseq.tkip.iv16; } seq[0] = iv16 & 0xff; seq[1] = (iv16 >> 8) & 0xff; seq[2] = iv32 & 0xff; seq[3] = (iv32 >> 8) & 0xff; seq[4] = (iv32 >> 16) & 0xff; seq[5] = (iv32 >> 24) & 0xff; params.seq = seq; params.seq_len = 6; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_cmac)); fallthrough; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_gmac)); fallthrough; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), gcmp)); if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { drv_get_key_seq(sdata->local, key, &kseq); memcpy(seq, kseq.ccmp.pn, 6); } else { pn64 = atomic64_read(&key->conf.tx_pn); seq[0] = pn64; seq[1] = pn64 >> 8; seq[2] = pn64 >> 16; seq[3] = pn64 >> 24; seq[4] = pn64 >> 32; seq[5] = pn64 >> 40; } params.seq = seq; params.seq_len = 6; break; default: if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) break; if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) break; drv_get_key_seq(sdata->local, key, &kseq); params.seq = kseq.hw.seq; params.seq_len = kseq.hw.seq_len; break; } callback(cookie, ¶ms); err = 0; out: rcu_read_unlock(); return err; } static int ieee80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link = ieee80211_link_or_deflink(sdata, link_id, false); if (IS_ERR(link)) return PTR_ERR(link); ieee80211_set_default_key(link, key_idx, uni, multi); return 0; } static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link = ieee80211_link_or_deflink(sdata, link_id, true); if (IS_ERR(link)) return PTR_ERR(link); ieee80211_set_default_mgmt_key(link, key_idx); return 0; } static int ieee80211_config_default_beacon_key(struct wiphy *wiphy, struct net_device *dev, int link_id, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link = ieee80211_link_or_deflink(sdata, link_id, true); if (IS_ERR(link)) return PTR_ERR(link); ieee80211_set_default_beacon_key(link, key_idx); return 0; } void sta_set_rate_info_tx(struct sta_info *sta, const struct ieee80211_tx_rate *rate, struct rate_info *rinfo) { rinfo->flags = 0; if (rate->flags & IEEE80211_TX_RC_MCS) { rinfo->flags |= RATE_INFO_FLAGS_MCS; rinfo->mcs = rate->idx; } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; rinfo->mcs = ieee80211_rate_get_vht_mcs(rate); rinfo->nss = ieee80211_rate_get_vht_nss(rate); } else { struct ieee80211_supported_band *sband; sband = ieee80211_get_sband(sta->sdata); WARN_ON_ONCE(sband && !sband->bitrates); if (sband && sband->bitrates) rinfo->legacy = sband->bitrates[rate->idx].bitrate; } if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_40; else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_80; else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_160; else rinfo->bw = RATE_INFO_BW_20; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; } static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret = -ENOENT; lockdep_assert_wiphy(local->hw.wiphy); sta = sta_info_get_by_idx(sdata, idx); if (sta) { ret = 0; memcpy(mac, sta->sta.addr, ETH_ALEN); sta_set_sinfo(sta, sinfo, true); /* Add accumulated removed link data to sinfo data for * consistency for MLO */ if (sinfo->valid_links) sta_set_accumulated_removed_links_sinfo(sta, sinfo); } return ret; } static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, int idx, struct survey_info *survey) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); return drv_get_survey(local, idx, survey); } static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret = -ENOENT; lockdep_assert_wiphy(local->hw.wiphy); sta = sta_info_get_bss(sdata, mac); if (sta) { ret = 0; sta_set_sinfo(sta, sinfo, true); /* Add accumulated removed link data to sinfo data for * consistency for MLO */ if (sinfo->valid_links) sta_set_accumulated_removed_links_sinfo(sta, sinfo); } return ret; } static int ieee80211_set_monitor_channel(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; struct ieee80211_chan_req chanreq = { .oper = *chandef }; int ret; lockdep_assert_wiphy(local->hw.wiphy); sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { if (cfg80211_chandef_identical(&local->monitor_chanreq.oper, &chanreq.oper)) return 0; sdata = wiphy_dereference(wiphy, local->monitor_sdata); if (!sdata) goto done; } if (rcu_access_pointer(sdata->deflink.conf->chanctx_conf) && cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper, &chanreq.oper)) return 0; ieee80211_link_release_channel(&sdata->deflink); ret = ieee80211_link_use_channel(&sdata->deflink, &chanreq, IEEE80211_CHANCTX_SHARED); if (ret) return ret; done: local->monitor_chanreq = chanreq; return 0; } static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, const u8 *resp, size_t resp_len, const struct ieee80211_csa_settings *csa, const struct ieee80211_color_change_settings *cca, struct ieee80211_link_data *link) { struct probe_resp *new, *old; if (!resp || !resp_len) return 1; old = sdata_dereference(link->u.ap.probe_resp, sdata); new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL); if (!new) return -ENOMEM; new->len = resp_len; memcpy(new->data, resp, resp_len); if (csa) memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_presp, csa->n_counter_offsets_presp * sizeof(new->cntdwn_counter_offsets[0])); else if (cca) new->cntdwn_counter_offsets[0] = cca->counter_offset_presp; rcu_assign_pointer(link->u.ap.probe_resp, new); if (old) kfree_rcu(old, rcu_head); return 0; } static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata, struct cfg80211_fils_discovery *params, struct ieee80211_link_data *link, struct ieee80211_bss_conf *link_conf, u64 *changed) { struct fils_discovery_data *new, *old = NULL; struct ieee80211_fils_discovery *fd; if (!params->update) return 0; fd = &link_conf->fils_discovery; fd->min_interval = params->min_interval; fd->max_interval = params->max_interval; old = sdata_dereference(link->u.ap.fils_discovery, sdata); if (old) kfree_rcu(old, rcu_head); if (params->tmpl && params->tmpl_len) { new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL); if (!new) return -ENOMEM; new->len = params->tmpl_len; memcpy(new->data, params->tmpl, params->tmpl_len); rcu_assign_pointer(link->u.ap.fils_discovery, new); } else { RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL); } *changed |= BSS_CHANGED_FILS_DISCOVERY; return 0; } static int ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata, struct cfg80211_unsol_bcast_probe_resp *params, struct ieee80211_link_data *link, struct ieee80211_bss_conf *link_conf, u64 *changed) { struct unsol_bcast_probe_resp_data *new, *old = NULL; if (!params->update) return 0; link_conf->unsol_bcast_probe_resp_interval = params->interval; old = sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata); if (old) kfree_rcu(old, rcu_head); if (params->tmpl && params->tmpl_len) { new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL); if (!new) return -ENOMEM; new->len = params->tmpl_len; memcpy(new->data, params->tmpl, params->tmpl_len); rcu_assign_pointer(link->u.ap.unsol_bcast_probe_resp, new); } else { RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL); } *changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP; return 0; } static int ieee80211_set_s1g_short_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct cfg80211_s1g_short_beacon *params) { struct s1g_short_beacon_data *new; struct s1g_short_beacon_data *old = sdata_dereference(link->u.ap.s1g_short_beacon, sdata); size_t new_len = sizeof(*new) + params->short_head_len + params->short_tail_len; if (!params->update) return 0; if (!params->short_head) return -EINVAL; new = kzalloc(new_len, GFP_KERNEL); if (!new) return -ENOMEM; /* Memory layout: | struct | head | tail | */ new->short_head = (u8 *)new + sizeof(*new); new->short_head_len = params->short_head_len; memcpy(new->short_head, params->short_head, params->short_head_len); if (params->short_tail) { new->short_tail = new->short_head + params->short_head_len; new->short_tail_len = params->short_tail_len; memcpy(new->short_tail, params->short_tail, params->short_tail_len); } rcu_assign_pointer(link->u.ap.s1g_short_beacon, new); if (old) kfree_rcu(old, rcu_head); return 0; } static int ieee80211_set_ftm_responder_params( struct ieee80211_sub_if_data *sdata, const u8 *lci, size_t lci_len, const u8 *civicloc, size_t civicloc_len, struct ieee80211_bss_conf *link_conf) { struct ieee80211_ftm_responder_params *new, *old; u8 *pos; int len; if (!lci_len && !civicloc_len) return 0; old = link_conf->ftmr_params; len = lci_len + civicloc_len; new = kzalloc(sizeof(*new) + len, GFP_KERNEL); if (!new) return -ENOMEM; pos = (u8 *)(new + 1); if (lci_len) { new->lci_len = lci_len; new->lci = pos; memcpy(pos, lci, lci_len); pos += lci_len; } if (civicloc_len) { new->civicloc_len = civicloc_len; new->civicloc = pos; memcpy(pos, civicloc, civicloc_len); pos += civicloc_len; } link_conf->ftmr_params = new; kfree(old); return 0; } static int ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst, struct cfg80211_mbssid_elems *src) { int i, offset = 0; dst->cnt = src->cnt; for (i = 0; i < src->cnt; i++) { memcpy(pos + offset, src->elem[i].data, src->elem[i].len); dst->elem[i].len = src->elem[i].len; dst->elem[i].data = pos + offset; offset += dst->elem[i].len; } return offset; } static int ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst, struct cfg80211_rnr_elems *src) { int i, offset = 0; dst->cnt = src->cnt; for (i = 0; i < src->cnt; i++) { memcpy(pos + offset, src->elem[i].data, src->elem[i].len); dst->elem[i].len = src->elem[i].len; dst->elem[i].data = pos + offset; offset += dst->elem[i].len; } return offset; } static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct cfg80211_beacon_data *params, const struct ieee80211_csa_settings *csa, const struct ieee80211_color_change_settings *cca, u64 *changed) { struct cfg80211_mbssid_elems *mbssid = NULL; struct cfg80211_rnr_elems *rnr = NULL; struct beacon_data *new, *old; int new_head_len, new_tail_len; int size, err; u64 _changed = BSS_CHANGED_BEACON; struct ieee80211_bss_conf *link_conf = link->conf; old = sdata_dereference(link->u.ap.beacon, sdata); /* Need to have a beacon head if we don't have one yet */ if (!params->head && !old) return -EINVAL; /* new or old head? */ if (params->head) new_head_len = params->head_len; else new_head_len = old->head_len; /* new or old tail? */ if (params->tail || !old) /* params->tail_len will be zero for !params->tail */ new_tail_len = params->tail_len; else new_tail_len = old->tail_len; size = sizeof(*new) + new_head_len + new_tail_len; if (params->mbssid_ies) { mbssid = params->mbssid_ies; size += struct_size(new->mbssid_ies, elem, mbssid->cnt); if (params->rnr_ies) { rnr = params->rnr_ies; size += struct_size(new->rnr_ies, elem, rnr->cnt); } size += ieee80211_get_mbssid_beacon_len(mbssid, rnr, mbssid->cnt); } new = kzalloc(size, GFP_KERNEL); if (!new) return -ENOMEM; /* start filling the new info now */ /* * pointers go into the block we allocated, * memory is | beacon_data | head | tail | mbssid_ies | rnr_ies */ new->head = ((u8 *) new) + sizeof(*new); new->tail = new->head + new_head_len; new->head_len = new_head_len; new->tail_len = new_tail_len; /* copy in optional mbssid_ies */ if (mbssid) { u8 *pos = new->tail + new->tail_len; new->mbssid_ies = (void *)pos; pos += struct_size(new->mbssid_ies, elem, mbssid->cnt); pos += ieee80211_copy_mbssid_beacon(pos, new->mbssid_ies, mbssid); if (rnr) { new->rnr_ies = (void *)pos; pos += struct_size(new->rnr_ies, elem, rnr->cnt); ieee80211_copy_rnr_beacon(pos, new->rnr_ies, rnr); } /* update bssid_indicator */ if (new->mbssid_ies->cnt && new->mbssid_ies->elem[0].len > 2) link_conf->bssid_indicator = *(new->mbssid_ies->elem[0].data + 2); else link_conf->bssid_indicator = 0; } if (csa) { new->cntdwn_current_counter = csa->count; memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_beacon, csa->n_counter_offsets_beacon * sizeof(new->cntdwn_counter_offsets[0])); } else if (cca) { new->cntdwn_current_counter = cca->count; new->cntdwn_counter_offsets[0] = cca->counter_offset_beacon; } /* copy in head */ if (params->head) memcpy(new->head, params->head, new_head_len); else memcpy(new->head, old->head, new_head_len); /* copy in optional tail */ if (params->tail) memcpy(new->tail, params->tail, new_tail_len); else if (old) memcpy(new->tail, old->tail, new_tail_len); err = ieee80211_set_probe_resp(sdata, params->probe_resp, params->probe_resp_len, csa, cca, link); if (err < 0) { kfree(new); return err; } if (err == 0) _changed |= BSS_CHANGED_AP_PROBE_RESP; if (params->ftm_responder != -1) { link_conf->ftm_responder = params->ftm_responder; err = ieee80211_set_ftm_responder_params(sdata, params->lci, params->lci_len, params->civicloc, params->civicloc_len, link_conf); if (err < 0) { kfree(new); return err; } _changed |= BSS_CHANGED_FTM_RESPONDER; } rcu_assign_pointer(link->u.ap.beacon, new); sdata->u.ap.active = true; if (old) kfree_rcu(old, rcu_head); *changed |= _changed; return 0; } static u8 ieee80211_num_beaconing_links(struct ieee80211_sub_if_data *sdata) { struct ieee80211_link_data *link; u8 link_id, num = 0; if (sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_P2P_GO) return num; /* non-MLO mode of operation also uses link_id 0 in sdata so it is * safe to directly proceed with the below loop */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; if (sdata_dereference(link->u.ap.beacon, sdata)) num++; } return num; } static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct beacon_data *old; struct ieee80211_sub_if_data *vlan; u64 changed = BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON | BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER | BSS_CHANGED_TWT; int i, err; int prev_beacon_int; unsigned int link_id = params->beacon.link_id; struct ieee80211_link_data *link; struct ieee80211_bss_conf *link_conf; struct ieee80211_chan_req chanreq = { .oper = params->chandef }; u64 tsf; lockdep_assert_wiphy(local->hw.wiphy); link = sdata_dereference(sdata->link[link_id], sdata); if (!link) return -ENOLINK; link_conf = link->conf; old = sdata_dereference(link->u.ap.beacon, sdata); if (old) return -EALREADY; link->smps_mode = IEEE80211_SMPS_OFF; link->needed_rx_chains = sdata->local->rx_chains; prev_beacon_int = link_conf->beacon_int; link_conf->beacon_int = params->beacon_interval; if (params->ht_cap) link_conf->ht_ldpc = params->ht_cap->cap_info & cpu_to_le16(IEEE80211_HT_CAP_LDPC_CODING); if (params->vht_cap) { link_conf->vht_ldpc = params->vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC); link_conf->vht_su_beamformer = params->vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); link_conf->vht_su_beamformee = params->vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); link_conf->vht_mu_beamformer = params->vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); link_conf->vht_mu_beamformee = params->vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); } if (params->he_cap && params->he_oper) { link_conf->he_support = true; link_conf->htc_trig_based_pkt_ext = le32_get_bits(params->he_oper->he_oper_params, IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); link_conf->frame_time_rts_th = le32_get_bits(params->he_oper->he_oper_params, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); changed |= BSS_CHANGED_HE_OBSS_PD; if (params->beacon.he_bss_color.enabled) changed |= BSS_CHANGED_HE_BSS_COLOR; } if (params->he_cap) { link_conf->he_ldpc = params->he_cap->phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; link_conf->he_su_beamformer = params->he_cap->phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; link_conf->he_su_beamformee = params->he_cap->phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; link_conf->he_mu_beamformer = params->he_cap->phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; link_conf->he_full_ul_mumimo = params->he_cap->phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; } if (params->eht_cap) { if (!link_conf->he_support) return -EOPNOTSUPP; link_conf->eht_support = true; link_conf->eht_su_beamformer = params->eht_cap->fixed.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER; link_conf->eht_su_beamformee = params->eht_cap->fixed.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; link_conf->eht_mu_beamformer = params->eht_cap->fixed.phy_cap_info[7] & (IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); link_conf->eht_80mhz_full_bw_ul_mumimo = params->eht_cap->fixed.phy_cap_info[7] & (IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); link_conf->eht_disable_mcs15 = u8_get_bits(params->eht_oper->params, IEEE80211_EHT_OPER_MCS15_DISABLE); } else { link_conf->eht_su_beamformer = false; link_conf->eht_su_beamformee = false; link_conf->eht_mu_beamformer = false; } if (sdata->vif.type == NL80211_IFTYPE_AP && params->mbssid_config.tx_wdev) { err = ieee80211_set_ap_mbssid_options(sdata, ¶ms->mbssid_config, link_conf); if (err) return err; } err = ieee80211_link_use_channel(link, &chanreq, IEEE80211_CHANCTX_SHARED); if (!err) ieee80211_link_copy_chanctx_to_vlans(link, false); if (err) { link_conf->beacon_int = prev_beacon_int; return err; } /* * Apply control port protocol, this allows us to * not encrypt dynamic WEP control frames. */ sdata->control_port_protocol = params->crypto.control_port_ethertype; sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt; sdata->control_port_over_nl80211 = params->crypto.control_port_over_nl80211; sdata->control_port_no_preauth = params->crypto.control_port_no_preauth; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { vlan->control_port_protocol = params->crypto.control_port_ethertype; vlan->control_port_no_encrypt = params->crypto.control_port_no_encrypt; vlan->control_port_over_nl80211 = params->crypto.control_port_over_nl80211; vlan->control_port_no_preauth = params->crypto.control_port_no_preauth; } link_conf->dtim_period = params->dtim_period; link_conf->enable_beacon = true; link_conf->allow_p2p_go_ps = sdata->vif.p2p; link_conf->twt_responder = params->twt_responder; link_conf->he_obss_pd = params->he_obss_pd; link_conf->he_bss_color = params->beacon.he_bss_color; link_conf->s1g_long_beacon_period = params->s1g_long_beacon_period; sdata->vif.cfg.s1g = params->chandef.chan->band == NL80211_BAND_S1GHZ; sdata->vif.cfg.ssid_len = params->ssid_len; if (params->ssid_len) memcpy(sdata->vif.cfg.ssid, params->ssid, params->ssid_len); link_conf->hidden_ssid = (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); memset(&link_conf->p2p_noa_attr, 0, sizeof(link_conf->p2p_noa_attr)); link_conf->p2p_noa_attr.oppps_ctwindow = params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; if (params->p2p_opp_ps) link_conf->p2p_noa_attr.oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; sdata->beacon_rate_set = false; if (wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) { for (i = 0; i < NUM_NL80211_BANDS; i++) { sdata->beacon_rateidx_mask[i] = params->beacon_rate.control[i].legacy; if (sdata->beacon_rateidx_mask[i]) sdata->beacon_rate_set = true; } } if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) link_conf->beacon_tx_rate = params->beacon_rate; err = ieee80211_assign_beacon(sdata, link, ¶ms->beacon, NULL, NULL, &changed); if (err < 0) goto error; err = ieee80211_set_fils_discovery(sdata, ¶ms->fils_discovery, link, link_conf, &changed); if (err < 0) goto error; err = ieee80211_set_unsol_bcast_probe_resp(sdata, ¶ms->unsol_bcast_probe_resp, link, link_conf, &changed); if (err < 0) goto error; if (sdata->vif.cfg.s1g) { err = ieee80211_set_s1g_short_beacon(sdata, link, ¶ms->s1g_short_beacon); if (err < 0) goto error; } err = drv_start_ap(sdata->local, sdata, link_conf); if (err) { old = sdata_dereference(link->u.ap.beacon, sdata); if (old) kfree_rcu(old, rcu_head); RCU_INIT_POINTER(link->u.ap.beacon, NULL); if (ieee80211_num_beaconing_links(sdata) == 0) sdata->u.ap.active = false; goto error; } tsf = drv_get_tsf(local, sdata); ieee80211_recalc_dtim(sdata, tsf); if (link->u.ap.s1g_short_beacon) ieee80211_recalc_sb_count(sdata, tsf); ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID); ieee80211_link_info_change_notify(sdata, link, changed); if (ieee80211_num_beaconing_links(sdata) <= 1) netif_carrier_on(dev); list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) netif_carrier_on(vlan->dev); return 0; error: ieee80211_link_release_channel(link); return err; } static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_update *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link; struct cfg80211_beacon_data *beacon = ¶ms->beacon; struct beacon_data *old; int err; struct ieee80211_bss_conf *link_conf; u64 changed = 0; lockdep_assert_wiphy(wiphy); link = sdata_dereference(sdata->link[beacon->link_id], sdata); if (!link) return -ENOLINK; link_conf = link->conf; /* don't allow changing the beacon while a countdown is in place - offset * of channel switch counter may change */ if (link_conf->csa_active || link_conf->color_change_active) return -EBUSY; old = sdata_dereference(link->u.ap.beacon, sdata); if (!old) return -ENOENT; err = ieee80211_assign_beacon(sdata, link, beacon, NULL, NULL, &changed); if (err < 0) return err; err = ieee80211_set_fils_discovery(sdata, ¶ms->fils_discovery, link, link_conf, &changed); if (err < 0) return err; err = ieee80211_set_unsol_bcast_probe_resp(sdata, ¶ms->unsol_bcast_probe_resp, link, link_conf, &changed); if (err < 0) return err; if (link->u.ap.s1g_short_beacon) { err = ieee80211_set_s1g_short_beacon(sdata, link, ¶ms->s1g_short_beacon); if (err < 0) return err; } if (beacon->he_bss_color_valid && beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) { link_conf->he_bss_color.enabled = beacon->he_bss_color.enabled; changed |= BSS_CHANGED_HE_BSS_COLOR; } ieee80211_link_info_change_notify(sdata, link, changed); return 0; } static void ieee80211_free_next_beacon(struct ieee80211_link_data *link) { if (!link->u.ap.next_beacon) return; kfree(link->u.ap.next_beacon->mbssid_ies); kfree(link->u.ap.next_beacon->rnr_ies); kfree(link->u.ap.next_beacon); link->u.ap.next_beacon = NULL; } static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_sub_if_data *vlan; struct ieee80211_local *local = sdata->local; struct beacon_data *old_beacon; struct probe_resp *old_probe_resp; struct fils_discovery_data *old_fils_discovery; struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp; struct s1g_short_beacon_data *old_s1g_short_beacon; struct cfg80211_chan_def chandef; struct ieee80211_link_data *link = sdata_dereference(sdata->link[link_id], sdata); struct ieee80211_bss_conf *link_conf = link->conf; LIST_HEAD(keys); lockdep_assert_wiphy(local->hw.wiphy); old_beacon = sdata_dereference(link->u.ap.beacon, sdata); if (!old_beacon) return -ENOENT; old_probe_resp = sdata_dereference(link->u.ap.probe_resp, sdata); old_fils_discovery = sdata_dereference(link->u.ap.fils_discovery, sdata); old_unsol_bcast_probe_resp = sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata); old_s1g_short_beacon = sdata_dereference(link->u.ap.s1g_short_beacon, sdata); /* abort any running channel switch or color change */ link_conf->csa_active = false; link_conf->color_change_active = false; ieee80211_vif_unblock_queues_csa(sdata); ieee80211_free_next_beacon(link); /* turn off carrier for this interface and dependent VLANs */ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) netif_carrier_off(vlan->dev); if (ieee80211_num_beaconing_links(sdata) <= 1) { netif_carrier_off(dev); sdata->u.ap.active = false; } /* remove beacon and probe response */ RCU_INIT_POINTER(link->u.ap.beacon, NULL); RCU_INIT_POINTER(link->u.ap.probe_resp, NULL); RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL); RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL); RCU_INIT_POINTER(link->u.ap.s1g_short_beacon, NULL); kfree_rcu(old_beacon, rcu_head); if (old_probe_resp) kfree_rcu(old_probe_resp, rcu_head); if (old_fils_discovery) kfree_rcu(old_fils_discovery, rcu_head); if (old_unsol_bcast_probe_resp) kfree_rcu(old_unsol_bcast_probe_resp, rcu_head); if (old_s1g_short_beacon) kfree_rcu(old_s1g_short_beacon, rcu_head); kfree(link_conf->ftmr_params); link_conf->ftmr_params = NULL; link_conf->bssid_index = 0; link_conf->nontransmitted = false; link_conf->ema_ap = false; link_conf->bssid_indicator = 0; link_conf->fils_discovery.min_interval = 0; link_conf->fils_discovery.max_interval = 0; link_conf->unsol_bcast_probe_resp_interval = 0; __sta_info_flush(sdata, true, link_id, NULL); ieee80211_remove_link_keys(link, &keys); if (!list_empty(&keys)) { synchronize_net(); ieee80211_free_key_list(local, &keys); } ieee80211_stop_mbssid(sdata); RCU_INIT_POINTER(link_conf->tx_bss_conf, NULL); link_conf->enable_beacon = false; sdata->beacon_rate_set = false; sdata->vif.cfg.ssid_len = 0; sdata->vif.cfg.s1g = false; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_BEACON_ENABLED); if (sdata->wdev.links[link_id].cac_started) { chandef = link_conf->chanreq.oper; wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, link_id); } drv_stop_ap(sdata->local, sdata, link_conf); /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); ieee80211_link_copy_chanctx_to_vlans(link, true); ieee80211_link_release_channel(link); return 0; } static int sta_apply_auth_flags(struct ieee80211_local *local, struct sta_info *sta, u32 mask, u32 set) { int ret; if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && !test_sta_flag(sta, WLAN_STA_AUTH)) { ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && set & BIT(NL80211_STA_FLAG_ASSOCIATED) && !test_sta_flag(sta, WLAN_STA_ASSOC)) { /* * When peer becomes associated, init rate control as * well. Some drivers require rate control initialized * before drv_sta_state() is called. */ if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_rate_init_all_links(sta); ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); else ret = 0; if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) && !(set & BIT(NL80211_STA_FLAG_ASSOCIATED)) && test_sta_flag(sta, WLAN_STA_ASSOC)) { ret = sta_info_move_state(sta, IEEE80211_STA_AUTH); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) && !(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && test_sta_flag(sta, WLAN_STA_AUTH)) { ret = sta_info_move_state(sta, IEEE80211_STA_NONE); if (ret) return ret; } return 0; } static void sta_apply_mesh_params(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { #ifdef CONFIG_MAC80211_MESH struct ieee80211_sub_if_data *sdata = sta->sdata; u64 changed = 0; if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { switch (params->plink_state) { case NL80211_PLINK_ESTAB: if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) changed = mesh_plink_inc_estab_count(sdata); sta->mesh->plink_state = params->plink_state; sta->mesh->aid = params->peer_aid; ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, sdata->u.mesh.mshcfg.power_mode); ewma_mesh_tx_rate_avg_init(&sta->mesh->tx_rate_avg); /* init at low value */ ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, 10); break; case NL80211_PLINK_LISTEN: case NL80211_PLINK_BLOCKED: case NL80211_PLINK_OPN_SNT: case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_CNF_RCVD: case NL80211_PLINK_HOLDING: if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) changed = mesh_plink_dec_estab_count(sdata); sta->mesh->plink_state = params->plink_state; ieee80211_mps_sta_status_update(sta); changed |= ieee80211_mps_set_sta_local_pm(sta, NL80211_MESH_POWER_UNKNOWN); break; default: /* nothing */ break; } } switch (params->plink_action) { case NL80211_PLINK_ACTION_NO_ACTION: /* nothing */ break; case NL80211_PLINK_ACTION_OPEN: changed |= mesh_plink_open(sta); break; case NL80211_PLINK_ACTION_BLOCK: changed |= mesh_plink_block(sta); break; } if (params->local_pm) changed |= ieee80211_mps_set_sta_local_pm(sta, params->local_pm); ieee80211_mbss_info_change_notify(sdata, changed); #endif } enum sta_link_apply_mode { STA_LINK_MODE_NEW, STA_LINK_MODE_STA_MODIFY, STA_LINK_MODE_LINK_MODIFY, }; static int sta_link_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, enum sta_link_apply_mode mode, struct link_station_parameters *params) { struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; u32 link_id = params->link_id < 0 ? 0 : params->link_id; struct ieee80211_link_data *link = sdata_dereference(sdata->link[link_id], sdata); struct link_sta_info *link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&local->hw.wiphy->mtx)); bool changes = params->link_mac || params->txpwr_set || params->supported_rates_len || params->ht_capa || params->vht_capa || params->he_capa || params->eht_capa || params->s1g_capa || params->opmode_notif_used; switch (mode) { case STA_LINK_MODE_NEW: if (!params->link_mac) return -EINVAL; break; case STA_LINK_MODE_LINK_MODIFY: break; case STA_LINK_MODE_STA_MODIFY: if (params->link_id >= 0) break; if (!changes) return 0; break; } if (!link || !link_sta) return -EINVAL; sband = ieee80211_get_link_sband(link); if (!sband) return -EINVAL; if (params->link_mac) { if (mode == STA_LINK_MODE_NEW) { memcpy(link_sta->addr, params->link_mac, ETH_ALEN); memcpy(link_sta->pub->addr, params->link_mac, ETH_ALEN); } else if (!ether_addr_equal(link_sta->addr, params->link_mac)) { return -EINVAL; } } if (params->txpwr_set) { int ret; link_sta->pub->txpwr.type = params->txpwr.type; if (params->txpwr.type == NL80211_TX_POWER_LIMITED) link_sta->pub->txpwr.power = params->txpwr.power; ret = drv_sta_set_txpwr(local, sdata, sta); if (ret) return ret; } if (params->supported_rates && params->supported_rates_len && !ieee80211_parse_bitrates(link->conf->chanreq.oper.width, sband, params->supported_rates, params->supported_rates_len, &link_sta->pub->supp_rates[sband->band])) return -EINVAL; if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, params->ht_capa, link_sta); /* VHT can override some HT caps such as the A-MSDU max length */ if (params->vht_capa) ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, params->vht_capa, NULL, link_sta); if (params->he_capa) ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, (void *)params->he_capa, params->he_capa_len, (void *)params->he_6ghz_capa, link_sta); if (params->he_capa && params->eht_capa) ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband, (u8 *)params->he_capa, params->he_capa_len, params->eht_capa, params->eht_capa_len, link_sta); if (params->s1g_capa) ieee80211_s1g_cap_to_sta_s1g_cap(sdata, params->s1g_capa, link_sta); ieee80211_sta_init_nss(link_sta); if (params->opmode_notif_used) { enum nl80211_chan_width width = link->conf->chanreq.oper.width; switch (width) { case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_320: /* not VHT, allowed for HE/EHT */ break; default: return -EINVAL; } /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it */ __ieee80211_vht_handle_opmode(sdata, link_sta, params->opmode_notif, sband->band); } return 0; } static int sta_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { struct ieee80211_sub_if_data *sdata = sta->sdata; u32 mask, set; int ret = 0; mask = params->sta_flags_mask; set = params->sta_flags_set; if (ieee80211_vif_is_mesh(&sdata->vif)) { /* * In mesh mode, ASSOCIATED isn't part of the nl80211 * API but must follow AUTHENTICATED for driver state. */ if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) mask |= BIT(NL80211_STA_FLAG_ASSOCIATED); if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) set |= BIT(NL80211_STA_FLAG_ASSOCIATED); } else if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* * TDLS -- everything follows authorized, but * only becoming authorized is possible, not * going back */ if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) { set |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); } } if (mask & BIT(NL80211_STA_FLAG_WME) && local->hw.queues >= IEEE80211_NUM_ACS) sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); /* auth flags will be set later for TDLS, * and for unassociated stations that move to associated */ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) && (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; } if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); else clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); } if (mask & BIT(NL80211_STA_FLAG_MFP)) { sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP)); if (set & BIT(NL80211_STA_FLAG_MFP)) set_sta_flag(sta, WLAN_STA_MFP); else clear_sta_flag(sta, WLAN_STA_MFP); } if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) { if (set & BIT(NL80211_STA_FLAG_TDLS_PEER)) set_sta_flag(sta, WLAN_STA_TDLS_PEER); else clear_sta_flag(sta, WLAN_STA_TDLS_PEER); } if (mask & BIT(NL80211_STA_FLAG_SPP_AMSDU)) sta->sta.spp_amsdu = set & BIT(NL80211_STA_FLAG_SPP_AMSDU); /* mark TDLS channel switch support, if the AP allows it */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !sdata->deflink.u.mgd.tdls_chan_switch_prohibited && params->ext_capab_len >= 4 && params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && !sdata->u.mgd.tdls_wider_bw_prohibited && ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && params->ext_capab_len >= 8 && params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW); if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) { sta->sta.uapsd_queues = params->uapsd_queues; sta->sta.max_sp = params->max_sp; } ieee80211_sta_set_max_amsdu_subframes(sta, params->ext_capab, params->ext_capab_len); /* * cfg80211 validates this (1-2007) and allows setting the AID * only when creating a new station entry. For S1G APs, the current * implementation supports a maximum of 1600 AIDs. */ if (params->aid) { if (sdata->vif.cfg.s1g && params->aid > IEEE80211_MAX_SUPPORTED_S1G_AID) return -EINVAL; sta->sta.aid = params->aid; } /* * Some of the following updates would be racy if called on an * existing station, via ieee80211_change_station(). However, * all such changes are rejected by cfg80211 except for updates * changing the supported rates on an existing but not yet used * TDLS peer. */ if (params->listen_interval >= 0) sta->listen_interval = params->listen_interval; if (params->eml_cap_present) sta->sta.eml_cap = params->eml_cap; ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_STA_MODIFY, ¶ms->link_sta_params); if (ret) return ret; if (params->support_p2p_ps >= 0) sta->sta.support_p2p_ps = params->support_p2p_ps; if (ieee80211_vif_is_mesh(&sdata->vif)) sta_apply_mesh_params(local, sta, params); if (params->airtime_weight) sta->airtime_weight = params->airtime_weight; /* set the STA state after all sta info from usermode has been set */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) || set & BIT(NL80211_STA_FLAG_ASSOCIATED)) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; } /* Mark the STA as MLO if MLD MAC address is available */ if (params->link_sta_params.mld_mac) sta->sta.mlo = true; return 0; } static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *sdata; int err; lockdep_assert_wiphy(local->hw.wiphy); if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; } else sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL; if (!is_valid_ether_addr(mac)) return -EINVAL; if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) && sdata->vif.type == NL80211_IFTYPE_STATION && !sdata->u.mgd.associated) return -EINVAL; /* * If we have a link ID, it can be a non-MLO station on an AP MLD, * but we need to have a link_mac in that case as well, so use the * STA's MAC address in that case. */ if (params->link_sta_params.link_id >= 0) sta = sta_info_alloc_with_link(sdata, mac, params->link_sta_params.link_id, params->link_sta_params.link_mac ?: mac, GFP_KERNEL); else sta = sta_info_alloc(sdata, mac, GFP_KERNEL); if (!sta) return -ENOMEM; if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) sta->sta.tdls = true; /* Though the mutex is not needed here (since the station is not * visible yet), sta_apply_parameters (and inner functions) require * the mutex due to other paths. */ err = sta_apply_parameters(local, sta, params); if (err) { sta_info_free(local, sta); return err; } /* * for TDLS and for unassociated station, rate control should be * initialized only when rates are known and station is marked * authorized/associated */ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && test_sta_flag(sta, WLAN_STA_ASSOC)) rate_control_rate_init_all_links(sta); return sta_info_insert(sta); } static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (params->mac) return sta_info_destroy_addr_bss(sdata, params->mac); sta_info_flush(sdata, params->link_id); return 0; } static int ieee80211_change_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *vlansdata; enum cfg80211_station_type statype; int err; lockdep_assert_wiphy(local->hw.wiphy); sta = sta_info_get_bss(sdata, mac); if (!sta) return -ENOENT; switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: if (sdata->u.mesh.user_mpm) statype = CFG80211_STA_MESH_PEER_USER; else statype = CFG80211_STA_MESH_PEER_KERNEL; break; case NL80211_IFTYPE_ADHOC: statype = CFG80211_STA_IBSS; break; case NL80211_IFTYPE_STATION: if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { statype = CFG80211_STA_AP_STA; break; } if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) statype = CFG80211_STA_TDLS_PEER_ACTIVE; else statype = CFG80211_STA_TDLS_PEER_SETUP; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: if (test_sta_flag(sta, WLAN_STA_ASSOC)) statype = CFG80211_STA_AP_CLIENT; else statype = CFG80211_STA_AP_CLIENT_UNASSOC; break; default: return -EOPNOTSUPP; } err = cfg80211_check_station_change(wiphy, params, statype); if (err) return err; if (params->vlan && params->vlan != sta->sdata->dev) { vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (params->vlan->ieee80211_ptr->use_4addr) { if (vlansdata->u.vlan.sta) return -EBUSY; rcu_assign_pointer(vlansdata->u.vlan.sta, sta); __ieee80211_check_fast_rx_iface(vlansdata); drv_sta_set_4addr(local, sta->sdata, &sta->sta, true); } if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sta->sdata->u.vlan.sta) RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ieee80211_vif_dec_num_mcast(sta->sdata); sta->sdata = vlansdata; ieee80211_check_fast_rx(sta); ieee80211_check_fast_xmit(sta); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { ieee80211_vif_inc_num_mcast(sta->sdata); cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); } } err = sta_apply_parameters(local, sta, params); if (err) return err; if (sdata->vif.type == NL80211_IFTYPE_STATION && params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); } return 0; } #ifdef CONFIG_MAC80211_MESH static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } mpath = mesh_path_add(sdata, dst); if (IS_ERR(mpath)) { rcu_read_unlock(); return PTR_ERR(mpath); } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (dst) return mesh_path_del(sdata, dst); mesh_path_flush_by_iface(sdata); return 0; } static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } mpath = mesh_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, struct mpath_info *pinfo) { struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop); if (next_hop_sta) memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); else eth_zero_addr(next_hop); memset(pinfo, 0, sizeof(*pinfo)); pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation; pinfo->filled = MPATH_INFO_FRAME_QLEN | MPATH_INFO_SN | MPATH_INFO_METRIC | MPATH_INFO_EXPTIME | MPATH_INFO_DISCOVERY_TIMEOUT | MPATH_INFO_DISCOVERY_RETRIES | MPATH_INFO_FLAGS | MPATH_INFO_HOP_COUNT | MPATH_INFO_PATH_CHANGE; pinfo->frame_qlen = mpath->frame_queue.qlen; pinfo->sn = mpath->sn; pinfo->metric = mpath->metric; if (time_before(jiffies, mpath->exp_time)) pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); pinfo->discovery_timeout = jiffies_to_msecs(mpath->discovery_timeout); pinfo->discovery_retries = mpath->discovery_retries; if (mpath->flags & MESH_PATH_ACTIVE) pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; if (mpath->flags & MESH_PATH_RESOLVING) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; if (mpath->flags & MESH_PATH_SN_VALID) pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; if (mpath->flags & MESH_PATH_FIXED) pinfo->flags |= NL80211_MPATH_FLAG_FIXED; if (mpath->flags & MESH_PATH_RESOLVED) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; pinfo->hop_count = mpath->hop_count; pinfo->path_change_count = mpath->path_change_count; } static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup_by_idx(sdata, idx); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, struct mpath_info *pinfo) { memset(pinfo, 0, sizeof(*pinfo)); memcpy(mpp, mpath->mpp, ETH_ALEN); pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation; } static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mpp_path_lookup(sdata, dst); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpp_set_pinfo(mpath, mpp, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mpp_path_lookup_by_idx(sdata, idx); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpp_set_pinfo(mpath, mpp, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); return 0; } static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask) { return (mask >> (parm-1)) & 0x1; } static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, const struct mesh_setup *setup) { u8 *new_ie; struct ieee80211_sub_if_data *sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); int i; /* allocate information elements */ new_ie = NULL; if (setup->ie_len) { new_ie = kmemdup(setup->ie, setup->ie_len, GFP_KERNEL); if (!new_ie) return -ENOMEM; } ifmsh->ie_len = setup->ie_len; ifmsh->ie = new_ie; /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); ifmsh->mesh_sp_id = setup->sync_method; ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->user_mpm = setup->user_mpm; ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; ifmsh->userspace_handles_dfs = setup->userspace_handles_dfs; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; if (setup->is_secure) ifmsh->security |= IEEE80211_MESH_SEC_SECURED; /* mcast rate setting in Mesh Node */ memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, sizeof(setup->mcast_rate)); sdata->vif.bss_conf.basic_rates = setup->basic_rates; sdata->vif.bss_conf.beacon_int = setup->beacon_interval; sdata->vif.bss_conf.dtim_period = setup->dtim_period; sdata->beacon_rate_set = false; if (wiphy_ext_feature_isset(sdata->local->hw.wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) { for (i = 0; i < NUM_NL80211_BANDS; i++) { sdata->beacon_rateidx_mask[i] = setup->beacon_rate.control[i].legacy; if (sdata->beacon_rateidx_mask[i]) sdata->beacon_rate_set = true; } } return 0; } static int ieee80211_update_mesh_config(struct wiphy *wiphy, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { struct mesh_config *conf; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_mesh *ifmsh; sdata = IEEE80211_DEV_TO_SUB_IF(dev); ifmsh = &sdata->u.mesh; /* Set the config options which we are interested in setting */ conf = &(sdata->u.mesh.mshcfg); if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) conf->dot11MeshRetryTimeout = nconf->dot11MeshRetryTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_CONFIRM_TIMEOUT, mask)) conf->dot11MeshConfirmTimeout = nconf->dot11MeshConfirmTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HOLDING_TIMEOUT, mask)) conf->dot11MeshHoldingTimeout = nconf->dot11MeshHoldingTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_PEER_LINKS, mask)) conf->dot11MeshMaxPeerLinks = nconf->dot11MeshMaxPeerLinks; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_RETRIES, mask)) conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries; if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) conf->dot11MeshTTL = nconf->dot11MeshTTL; if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) conf->element_ttl = nconf->element_ttl; if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) { if (ifmsh->user_mpm) return -EBUSY; conf->auto_open_plinks = nconf->auto_open_plinks; } if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) conf->dot11MeshNbrOffsetMaxNeighbor = nconf->dot11MeshNbrOffsetMaxNeighbor; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) conf->dot11MeshHWMPmaxPREQretries = nconf->dot11MeshHWMPmaxPREQretries; if (_chg_mesh_attr(NL80211_MESHCONF_PATH_REFRESH_TIME, mask)) conf->path_refresh_time = nconf->path_refresh_time; if (_chg_mesh_attr(NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, mask)) conf->min_discovery_timeout = nconf->min_discovery_timeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, mask)) conf->dot11MeshHWMPactivePathTimeout = nconf->dot11MeshHWMPactivePathTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) conf->dot11MeshHWMPpreqMinInterval = nconf->dot11MeshHWMPpreqMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask)) conf->dot11MeshHWMPperrMinInterval = nconf->dot11MeshHWMPperrMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, mask)) conf->dot11MeshHWMPnetDiameterTraversalTime = nconf->dot11MeshHWMPnetDiameterTraversalTime; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; ieee80211_mesh_root_setup(ifmsh); } if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) { /* our current gate announcement implementation rides on root * announcements, so require this ifmsh to also be a root node * */ if (nconf->dot11MeshGateAnnouncementProtocol && !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) { conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN; ieee80211_mesh_root_setup(ifmsh); } conf->dot11MeshGateAnnouncementProtocol = nconf->dot11MeshGateAnnouncementProtocol; } if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) conf->dot11MeshHWMPRannInterval = nconf->dot11MeshHWMPRannInterval; if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask)) conf->dot11MeshForwarding = nconf->dot11MeshForwarding; if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) { /* our RSSI threshold implementation is supported only for * devices that report signal in dBm. */ if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM)) return -EOPNOTSUPP; conf->rssi_threshold = nconf->rssi_threshold; } if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) { conf->ht_opmode = nconf->ht_opmode; sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_HT); } if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask)) conf->dot11MeshHWMPactivePathToRootTimeout = nconf->dot11MeshHWMPactivePathToRootTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask)) conf->dot11MeshHWMProotInterval = nconf->dot11MeshHWMProotInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask)) conf->dot11MeshHWMPconfirmationInterval = nconf->dot11MeshHWMPconfirmationInterval; if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) { conf->power_mode = nconf->power_mode; ieee80211_mps_local_status_update(sdata); } if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) conf->dot11MeshAwakeWindowDuration = nconf->dot11MeshAwakeWindowDuration; if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask)) conf->plink_timeout = nconf->plink_timeout; if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask)) conf->dot11MeshConnectedToMeshGate = nconf->dot11MeshConnectedToMeshGate; if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) conf->dot11MeshNolearn = nconf->dot11MeshNolearn; if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_AS, mask)) conf->dot11MeshConnectedToAuthServer = nconf->dot11MeshConnectedToAuthServer; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_chan_req chanreq = { .oper = setup->chandef }; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; int err; lockdep_assert_wiphy(sdata->local->hw.wiphy); memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config)); err = copy_mesh_setup(ifmsh, setup); if (err) return err; sdata->control_port_over_nl80211 = setup->control_port_over_nl80211; /* can mesh use other SMPS modes? */ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; sdata->deflink.needed_rx_chains = sdata->local->rx_chains; err = ieee80211_link_use_channel(&sdata->deflink, &chanreq, IEEE80211_CHANCTX_SHARED); if (err) return err; return ieee80211_start_mesh(sdata); } static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); lockdep_assert_wiphy(sdata->local->hw.wiphy); ieee80211_stop_mesh(sdata); ieee80211_link_release_channel(&sdata->deflink); kfree(sdata->u.mesh.ie); return 0; } #endif static int ieee80211_change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link; struct ieee80211_supported_band *sband; u64 changed = 0; link = ieee80211_link_or_deflink(sdata, params->link_id, true); if (IS_ERR(link)) return PTR_ERR(link); if (!sdata_dereference(link->u.ap.beacon, sdata)) return -ENOENT; sband = ieee80211_get_link_sband(link); if (!sband) return -EINVAL; if (params->basic_rates) { if (!ieee80211_parse_bitrates(link->conf->chanreq.oper.width, wiphy->bands[sband->band], params->basic_rates, params->basic_rates_len, &link->conf->basic_rates)) return -EINVAL; changed |= BSS_CHANGED_BASIC_RATES; ieee80211_check_rate_mask(link); } if (params->use_cts_prot >= 0) { link->conf->use_cts_prot = params->use_cts_prot; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (params->use_short_preamble >= 0) { link->conf->use_short_preamble = params->use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (!link->conf->use_short_slot && (sband->band == NL80211_BAND_5GHZ || sband->band == NL80211_BAND_6GHZ)) { link->conf->use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } if (params->use_short_slot_time >= 0) { link->conf->use_short_slot = params->use_short_slot_time; changed |= BSS_CHANGED_ERP_SLOT; } if (params->ap_isolate >= 0) { if (params->ap_isolate) sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; else sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS; ieee80211_check_fast_rx_iface(sdata); } if (params->ht_opmode >= 0) { link->conf->ht_operation_mode = (u16)params->ht_opmode; changed |= BSS_CHANGED_HT; } if (params->p2p_ctwindow >= 0) { link->conf->p2p_noa_attr.oppps_ctwindow &= ~IEEE80211_P2P_OPPPS_CTWINDOW_MASK; link->conf->p2p_noa_attr.oppps_ctwindow |= params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK; changed |= BSS_CHANGED_P2P_PS; } if (params->p2p_opp_ps > 0) { link->conf->p2p_noa_attr.oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; changed |= BSS_CHANGED_P2P_PS; } else if (params->p2p_opp_ps == 0) { link->conf->p2p_noa_attr.oppps_ctwindow &= ~IEEE80211_P2P_OPPPS_ENABLE_BIT; changed |= BSS_CHANGED_P2P_PS; } ieee80211_link_info_change_notify(sdata, link, changed); return 0; } static int ieee80211_set_txq_params(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_txq_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link = ieee80211_link_or_deflink(sdata, params->link_id, true); struct ieee80211_tx_queue_params p; if (!local->ops->conf_tx) return -EOPNOTSUPP; if (local->hw.queues < IEEE80211_NUM_ACS) return -EOPNOTSUPP; if (IS_ERR(link)) return PTR_ERR(link); memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; p.cw_min = params->cwmin; p.txop = params->txop; /* * Setting tx queue params disables u-apsd because it's only * called in master mode. */ p.uapsd = false; ieee80211_regulatory_limit_wmm_params(sdata, &p, params->ac); link->tx_conf[params->ac] = p; if (drv_conf_tx(local, link, params->ac, &p)) { wiphy_debug(local->hw.wiphy, "failed to set TX queue parameters for AC %d\n", params->ac); return -EINVAL; } ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_QOS); return 0; } #ifdef CONFIG_PM static int ieee80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan) { return __ieee80211_suspend(wiphy_priv(wiphy), wowlan); } static int ieee80211_resume(struct wiphy *wiphy) { return __ieee80211_resume(wiphy_priv(wiphy)); } #else #define ieee80211_suspend NULL #define ieee80211_resume NULL #endif static int ieee80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *req) { struct ieee80211_sub_if_data *sdata; struct ieee80211_link_data *link; struct ieee80211_channel *chan; int radio_idx; sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev); switch (ieee80211_vif_type_p2p(&sdata->vif)) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_P2P_GO: if (sdata->local->ops->hw_scan) break; /* * FIXME: implement NoA while scanning in software, * for now fall through to allow scanning only when * beaconing hasn't been configured yet */ fallthrough; case NL80211_IFTYPE_AP: /* * If the scan has been forced (and the driver supports * forcing), don't care about being beaconing already. * This will create problems to the attached stations (e.g. all * the frames sent while scanning on other channel will be * lost) */ for_each_link_data(sdata, link) { /* if the link is not beaconing, ignore it */ if (!sdata_dereference(link->u.ap.beacon, sdata)) continue; chan = link->conf->chanreq.oper.chan; radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan); if (ieee80211_is_radio_idx_in_scan_req(wiphy, req, radio_idx) && (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || !(req->flags & NL80211_SCAN_FLAG_AP))) return -EOPNOTSUPP; } break; case NL80211_IFTYPE_NAN: default: return -EOPNOTSUPP; } return ieee80211_request_scan(sdata, req); } static void ieee80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev) { ieee80211_scan_cancel(wiphy_priv(wiphy)); } static int ieee80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata->local->ops->sched_scan_start) return -EOPNOTSUPP; return ieee80211_request_sched_scan_start(sdata, req); } static int ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->sched_scan_stop) return -EOPNOTSUPP; return ieee80211_request_sched_scan_stop(local); } static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req) { return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req) { return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_deauth_request *req) { return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_disassoc_request *req) { return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params); } static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev)); } static int ieee80211_join_ocb(struct wiphy *wiphy, struct net_device *dev, struct ocb_setup *setup) { return ieee80211_ocb_join(IEEE80211_DEV_TO_SUB_IF(dev), setup); } static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev) { return ieee80211_ocb_leave(IEEE80211_DEV_TO_SUB_IF(dev)); } static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, int rate[NUM_NL80211_BANDS]) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(int) * NUM_NL80211_BANDS); if (ieee80211_sdata_running(sdata)) ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_MCAST_RATE); return 0; } static int ieee80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx, u32 changed) { struct ieee80211_local *local = wiphy_priv(wiphy); int err; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { ieee80211_check_fast_xmit_all(local); err = drv_set_frag_threshold(local, radio_idx, wiphy->frag_threshold); if (err) { ieee80211_check_fast_xmit_all(local); return err; } } if ((changed & WIPHY_PARAM_COVERAGE_CLASS) || (changed & WIPHY_PARAM_DYN_ACK)) { s16 coverage_class; coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ? wiphy->coverage_class : -1; err = drv_set_coverage_class(local, radio_idx, coverage_class); if (err) return err; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { u32 rts_threshold; if ((radio_idx == -1) || (radio_idx >= wiphy->n_radio)) rts_threshold = wiphy->rts_threshold; else rts_threshold = wiphy->radio_cfg[radio_idx].rts_threshold; err = drv_set_rts_threshold(local, radio_idx, rts_threshold); if (err) return err; } if (changed & WIPHY_PARAM_RETRY_SHORT) { if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY) return -EINVAL; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; } if (changed & WIPHY_PARAM_RETRY_LONG) { if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY) return -EINVAL; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; } if (changed & (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) ieee80211_hw_config(local, radio_idx, IEEE80211_CONF_CHANGE_RETRY_LIMITS); if (changed & (WIPHY_PARAM_TXQ_LIMIT | WIPHY_PARAM_TXQ_MEMORY_LIMIT | WIPHY_PARAM_TXQ_QUANTUM)) ieee80211_txq_set_params(local, radio_idx); return 0; } static int ieee80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, enum nl80211_tx_power_setting type, int mbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; enum nl80211_tx_power_setting txp_type = type; bool update_txp_type = false; bool has_monitor = false; int user_power_level; int old_power = local->user_power_level; lockdep_assert_wiphy(local->hw.wiphy); switch (type) { case NL80211_TX_POWER_AUTOMATIC: user_power_level = IEEE80211_UNSET_POWER_LEVEL; txp_type = NL80211_TX_POWER_LIMITED; break; case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (mbm < 0 || (mbm % 100)) return -EOPNOTSUPP; user_power_level = MBM_TO_DBM(mbm); break; default: return -EINVAL; } if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) return -EOPNOTSUPP; sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (!sdata) return -EOPNOTSUPP; } for (int link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link = wiphy_dereference(wiphy, sdata->link[link_id]); if (!link) continue; link->user_power_level = user_power_level; if (txp_type != link->conf->txpower_type) { update_txp_type = true; link->conf->txpower_type = txp_type; } ieee80211_recalc_txpower(link, update_txp_type); } return 0; } local->user_power_level = user_power_level; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { has_monitor = true; continue; } for (int link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link = wiphy_dereference(wiphy, sdata->link[link_id]); if (!link) continue; link->user_power_level = local->user_power_level; if (txp_type != link->conf->txpower_type) update_txp_type = true; link->conf->txpower_type = txp_type; } } list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) continue; for (int link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link = wiphy_dereference(wiphy, sdata->link[link_id]); if (!link) continue; ieee80211_recalc_txpower(link, update_txp_type); } } if (has_monitor) { sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { sdata->deflink.user_power_level = local->user_power_level; if (txp_type != sdata->vif.bss_conf.txpower_type) update_txp_type = true; sdata->vif.bss_conf.txpower_type = txp_type; ieee80211_recalc_txpower(&sdata->deflink, update_txp_type); } } if (local->emulate_chanctx && (old_power != local->user_power_level)) ieee80211_hw_conf_chan(local); return 0; } static int ieee80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, unsigned int link_id, int *dbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_link_data *link_data; if (local->ops->get_txpower && (sdata->flags & IEEE80211_SDATA_IN_DRIVER)) return drv_get_txpower(local, sdata, link_id, dbm); if (local->emulate_chanctx) { *dbm = local->hw.conf.power_level; } else { link_data = wiphy_dereference(wiphy, sdata->link[link_id]); if (link_data) *dbm = link_data->conf->txpower; else return -ENOLINK; } /* INT_MIN indicates no power level was set yet */ if (*dbm == INT_MIN) return -EINVAL; return 0; } static void ieee80211_rfkill_poll(struct wiphy *wiphy) { struct ieee80211_local *local = wiphy_priv(wiphy); drv_rfkill_poll(local); } #ifdef CONFIG_NL80211_TESTMODE static int ieee80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_vif *vif = NULL; if (!local->ops->testmode_cmd) return -EOPNOTSUPP; if (wdev) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) vif = &sdata->vif; } return local->ops->testmode_cmd(&local->hw, vif, data, len); } static int ieee80211_testmode_dump(struct wiphy *wiphy, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->testmode_dump) return -EOPNOTSUPP; return local->ops->testmode_dump(&local->hw, skb, cb, data, len); } #endif int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, enum ieee80211_smps_mode smps_mode) { const u8 *ap; enum ieee80211_smps_mode old_req; int err; struct sta_info *sta; bool tdls_peer_found = false; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) return -EINVAL; if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) return 0; old_req = link->u.mgd.req_smps; link->u.mgd.req_smps = smps_mode; /* The driver indicated that EML is enabled for the interface, which * implies that SMPS flows towards the AP should be stopped. */ if (sdata->vif.driver_flags & IEEE80211_VIF_EML_ACTIVE) return 0; if (old_req == smps_mode && smps_mode != IEEE80211_SMPS_AUTOMATIC) return 0; /* * If not associated, or current association is not an HT * association, there's no need to do anything, just store * the new value until we associate. */ if (!sdata->u.mgd.associated || link->conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT) return 0; ap = sdata->vif.cfg.ap_addr; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) continue; tdls_peer_found = true; break; } rcu_read_unlock(); if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { if (tdls_peer_found || !sdata->u.mgd.powersave) smps_mode = IEEE80211_SMPS_OFF; else smps_mode = IEEE80211_SMPS_DYNAMIC; } /* send SM PS frame to AP */ err = ieee80211_send_smps_action(sdata, smps_mode, ap, ap, ieee80211_vif_is_mld(&sdata->vif) ? link->link_id : -1); if (err) link->u.mgd.req_smps = old_req; else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found) ieee80211_teardown_tdls_peers(link); return err; } static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); unsigned int link_id; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) return -EOPNOTSUPP; if (enabled == sdata->u.mgd.powersave && timeout == local->dynamic_ps_forced_timeout) return 0; sdata->u.mgd.powersave = enabled; local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; __ieee80211_request_smps_mgd(sdata, link, link->u.mgd.req_smps); } if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS); ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); ieee80211_check_fast_rx_iface(sdata); return 0; } static void ieee80211_set_cqm_rssi_link(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, s32 rssi_thold, u32 rssi_hyst, s32 rssi_low, s32 rssi_high) { struct ieee80211_bss_conf *conf; if (!link || !link->conf) return; conf = link->conf; if (rssi_thold && rssi_hyst && rssi_thold == conf->cqm_rssi_thold && rssi_hyst == conf->cqm_rssi_hyst) return; conf->cqm_rssi_thold = rssi_thold; conf->cqm_rssi_hyst = rssi_hyst; conf->cqm_rssi_low = rssi_low; conf->cqm_rssi_high = rssi_high; link->u.mgd.last_cqm_event_signal = 0; if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) return; if (sdata->u.mgd.associated && (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_CQM); } static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_vif *vif = &sdata->vif; int link_id; if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER && !(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) return -EOPNOTSUPP; /* For MLD, handle CQM change on all the active links */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_link_data *link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_set_cqm_rssi_link(sdata, link, rssi_thold, rssi_hyst, 0, 0); } return 0; } static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_low, s32 rssi_high) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_vif *vif = &sdata->vif; int link_id; if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) return -EOPNOTSUPP; /* For MLD, handle CQM change on all the active links */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_link_data *link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_set_cqm_rssi_link(sdata, link, 0, 0, rssi_low, rssi_high); } return 0; } static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id, const u8 *addr, const struct cfg80211_bitrate_mask *mask) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int i, ret; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; /* * If active validate the setting and reject it if it doesn't leave * at least one basic rate usable, since we really have to be able * to send something, and if we're an AP we have to be able to do * so at a basic rate so that all clients can receive it. */ if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) && sdata->vif.bss_conf.chanreq.oper.chan) { u32 basic_rates = sdata->vif.bss_conf.basic_rates; enum nl80211_band band; band = sdata->vif.bss_conf.chanreq.oper.chan->band; if (!(mask->control[band].legacy & basic_rates)) return -EINVAL; } if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { ret = drv_set_bitrate_mask(local, sdata, mask); if (ret) return ret; } for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband = wiphy->bands[i]; int j; sdata->rc_rateidx_mask[i] = mask->control[i].legacy; memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs, sizeof(mask->control[i].ht_mcs)); memcpy(sdata->rc_rateidx_vht_mcs_mask[i], mask->control[i].vht_mcs, sizeof(mask->control[i].vht_mcs)); sdata->rc_has_mcs_mask[i] = false; sdata->rc_has_vht_mcs_mask[i] = false; if (!sband) continue; for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { sdata->rc_has_mcs_mask[i] = true; break; } } for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { sdata->rc_has_vht_mcs_mask[i] = true; break; } } } return 0; } static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy, struct ieee80211_local *local, struct cfg80211_chan_def *chandef) { struct cfg80211_scan_request *scan_req; int chan_radio_idx, req_radio_idx; struct ieee80211_roc_work *roc; if (list_empty(&local->roc_list) && !local->scanning) return false; req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan); if (local->scanning) { scan_req = wiphy_dereference(wiphy, local->scan_req); /* * Scan is going on but info is not there. Should not happen * but if it does, let's not take risk and assume we can't use * the hw hence return true */ if (WARN_ON_ONCE(!scan_req)) return true; return ieee80211_is_radio_idx_in_scan_req(wiphy, scan_req, req_radio_idx); } list_for_each_entry(roc, &local->roc_list, list) { chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, roc->chan); if (chan_radio_idx == req_radio_idx) return true; } return false; } static int ieee80211_start_radar_detection(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms, int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_chan_req chanreq = { .oper = *chandef }; struct ieee80211_local *local = sdata->local; struct ieee80211_link_data *link_data; int err; lockdep_assert_wiphy(local->hw.wiphy); if (ieee80211_is_scan_ongoing(wiphy, local, chandef)) return -EBUSY; link_data = sdata_dereference(sdata->link[link_id], sdata); if (!link_data) return -ENOLINK; /* whatever, but channel contexts should not complain about that one */ link_data->smps_mode = IEEE80211_SMPS_OFF; link_data->needed_rx_chains = local->rx_chains; err = ieee80211_link_use_channel(link_data, &chanreq, IEEE80211_CHANCTX_SHARED); if (err) return err; wiphy_delayed_work_queue(wiphy, &link_data->dfs_cac_timer_work, msecs_to_jiffies(cac_time_ms)); return 0; } static void ieee80211_end_cac(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_link_data *link_data; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { link_data = sdata_dereference(sdata->link[link_id], sdata); if (!link_data) continue; wiphy_delayed_work_cancel(wiphy, &link_data->dfs_cac_timer_work); if (sdata->wdev.links[link_id].cac_started) { ieee80211_link_release_channel(link_data); sdata->wdev.links[link_id].cac_started = false; } } } static struct cfg80211_beacon_data * cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) { struct cfg80211_beacon_data *new_beacon; u8 *pos; int len; len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + beacon->proberesp_ies_len + beacon->assocresp_ies_len + beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len; if (beacon->mbssid_ies) len += ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies, beacon->rnr_ies, beacon->mbssid_ies->cnt); new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); if (!new_beacon) return NULL; if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { new_beacon->mbssid_ies = kzalloc(struct_size(new_beacon->mbssid_ies, elem, beacon->mbssid_ies->cnt), GFP_KERNEL); if (!new_beacon->mbssid_ies) { kfree(new_beacon); return NULL; } if (beacon->rnr_ies && beacon->rnr_ies->cnt) { new_beacon->rnr_ies = kzalloc(struct_size(new_beacon->rnr_ies, elem, beacon->rnr_ies->cnt), GFP_KERNEL); if (!new_beacon->rnr_ies) { kfree(new_beacon->mbssid_ies); kfree(new_beacon); return NULL; } } } pos = (u8 *)(new_beacon + 1); if (beacon->head_len) { new_beacon->head_len = beacon->head_len; new_beacon->head = pos; memcpy(pos, beacon->head, beacon->head_len); pos += beacon->head_len; } if (beacon->tail_len) { new_beacon->tail_len = beacon->tail_len; new_beacon->tail = pos; memcpy(pos, beacon->tail, beacon->tail_len); pos += beacon->tail_len; } if (beacon->beacon_ies_len) { new_beacon->beacon_ies_len = beacon->beacon_ies_len; new_beacon->beacon_ies = pos; memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len); pos += beacon->beacon_ies_len; } if (beacon->proberesp_ies_len) { new_beacon->proberesp_ies_len = beacon->proberesp_ies_len; new_beacon->proberesp_ies = pos; memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len); pos += beacon->proberesp_ies_len; } if (beacon->assocresp_ies_len) { new_beacon->assocresp_ies_len = beacon->assocresp_ies_len; new_beacon->assocresp_ies = pos; memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len); pos += beacon->assocresp_ies_len; } if (beacon->probe_resp_len) { new_beacon->probe_resp_len = beacon->probe_resp_len; new_beacon->probe_resp = pos; memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); pos += beacon->probe_resp_len; } if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { pos += ieee80211_copy_mbssid_beacon(pos, new_beacon->mbssid_ies, beacon->mbssid_ies); if (beacon->rnr_ies && beacon->rnr_ies->cnt) pos += ieee80211_copy_rnr_beacon(pos, new_beacon->rnr_ies, beacon->rnr_ies); } /* might copy -1, meaning no changes requested */ new_beacon->ftm_responder = beacon->ftm_responder; if (beacon->lci) { new_beacon->lci_len = beacon->lci_len; new_beacon->lci = pos; memcpy(pos, beacon->lci, beacon->lci_len); pos += beacon->lci_len; } if (beacon->civicloc) { new_beacon->civicloc_len = beacon->civicloc_len; new_beacon->civicloc = pos; memcpy(pos, beacon->civicloc, beacon->civicloc_len); pos += beacon->civicloc_len; } return new_beacon; } void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_bss_conf *tx_bss_conf; struct ieee80211_link_data *link_data; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return; rcu_read_lock(); link_data = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link_data)) { rcu_read_unlock(); return; } tx_bss_conf = rcu_dereference(link_data->conf->tx_bss_conf); if (tx_bss_conf == link_data->conf) { /* Trigger ieee80211_csa_finish() on the non-transmitting * interfaces when channel switch is received on * transmitting interface */ struct ieee80211_link_data *iter; for_each_sdata_link_rcu(local, iter) { if (iter->sdata == sdata || rcu_access_pointer(iter->conf->tx_bss_conf) != tx_bss_conf) continue; wiphy_work_queue(iter->sdata->local->hw.wiphy, &iter->csa.finalize_work); } } wiphy_work_queue(local->hw.wiphy, &link_data->csa.finalize_work); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_csa_finish); void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; sdata_info(sdata, "channel switch failed, disconnecting\n"); wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work); } EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); static int ieee80211_set_after_csa_beacon(struct ieee80211_link_data *link_data, u64 *changed) { struct ieee80211_sub_if_data *sdata = link_data->sdata; int err; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: if (!link_data->u.ap.next_beacon) return -EINVAL; err = ieee80211_assign_beacon(sdata, link_data, link_data->u.ap.next_beacon, NULL, NULL, changed); ieee80211_free_next_beacon(link_data); if (err < 0) return err; break; case NL80211_IFTYPE_ADHOC: err = ieee80211_ibss_finish_csa(sdata, changed); if (err < 0) return err; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: err = ieee80211_mesh_finish_csa(sdata, changed); if (err < 0) return err; break; #endif default: WARN_ON(1); return -EINVAL; } return 0; } static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data) { struct ieee80211_sub_if_data *sdata = link_data->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_bss_conf *link_conf = link_data->conf; u64 changed = 0; int err; lockdep_assert_wiphy(local->hw.wiphy); /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the * work with no reserved_chanctx so verify chandef to check if it * completed successfully */ if (link_data->reserved_chanctx) { /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their * reservations */ if (link_data->reserved_ready) return 0; return ieee80211_link_use_reserved_context(link_data); } if (!cfg80211_chandef_identical(&link_conf->chanreq.oper, &link_data->csa.chanreq.oper)) return -EINVAL; link_conf->csa_active = false; err = ieee80211_set_after_csa_beacon(link_data, &changed); if (err) return err; ieee80211_link_info_change_notify(sdata, link_data, changed); ieee80211_vif_unblock_queues_csa(sdata); err = drv_post_channel_switch(link_data); if (err) return err; cfg80211_ch_switch_notify(sdata->dev, &link_data->csa.chanreq.oper, link_data->link_id); return 0; } static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data) { struct ieee80211_sub_if_data *sdata = link_data->sdata; if (__ieee80211_csa_finalize(link_data)) { sdata_info(sdata, "failed to finalize CSA on link %d, disconnecting\n", link_data->link_id); cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev, GFP_KERNEL); } } void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, csa.finalize_work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); /* AP might have been stopped while waiting for the lock. */ if (!link->conf->csa_active) return; if (!ieee80211_sdata_running(sdata)) return; ieee80211_csa_finalize(link); } static int ieee80211_set_csa_beacon(struct ieee80211_link_data *link_data, struct cfg80211_csa_settings *params, u64 *changed) { struct ieee80211_sub_if_data *sdata = link_data->sdata; struct ieee80211_csa_settings csa = {}; int err; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: link_data->u.ap.next_beacon = cfg80211_beacon_dup(¶ms->beacon_after); if (!link_data->u.ap.next_beacon) return -ENOMEM; /* * With a count of 0, we don't have to wait for any * TBTT before switching, so complete the CSA * immediately. In theory, with a count == 1 we * should delay the switch until just before the next * TBTT, but that would complicate things so we switch * immediately too. If we would delay the switch * until the next TBTT, we would have to set the probe * response here. * * TODO: A channel switch with count <= 1 without * sending a CSA action frame is kind of useless, * because the clients won't know we're changing * channels. The action frame must be implemented * either here or in the userspace. */ if (params->count <= 1) break; if ((params->n_counter_offsets_beacon > IEEE80211_MAX_CNTDWN_COUNTERS_NUM) || (params->n_counter_offsets_presp > IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) { ieee80211_free_next_beacon(link_data); return -EINVAL; } csa.counter_offsets_beacon = params->counter_offsets_beacon; csa.counter_offsets_presp = params->counter_offsets_presp; csa.n_counter_offsets_beacon = params->n_counter_offsets_beacon; csa.n_counter_offsets_presp = params->n_counter_offsets_presp; csa.count = params->count; err = ieee80211_assign_beacon(sdata, link_data, ¶ms->beacon_csa, &csa, NULL, changed); if (err < 0) { ieee80211_free_next_beacon(link_data); return err; } break; case NL80211_IFTYPE_ADHOC: if (!sdata->vif.cfg.ibss_joined) return -EINVAL; if (params->chandef.width != sdata->u.ibss.chandef.width) return -EINVAL; switch (params->chandef.width) { case NL80211_CHAN_WIDTH_40: if (cfg80211_get_chandef_type(¶ms->chandef) != cfg80211_get_chandef_type(&sdata->u.ibss.chandef)) return -EINVAL; break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: break; default: return -EINVAL; } /* changes into another band are not supported */ if (sdata->u.ibss.chandef.chan->band != params->chandef.chan->band) return -EINVAL; /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { err = ieee80211_ibss_csa_beacon(sdata, params, changed); if (err < 0) return err; } ieee80211_send_action_csa(sdata, params); break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; /* changes into another band are not supported */ if (sdata->vif.bss_conf.chanreq.oper.chan->band != params->chandef.chan->band) return -EINVAL; if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) { ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT; if (!ifmsh->pre_value) ifmsh->pre_value = 1; else ifmsh->pre_value++; } /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { err = ieee80211_mesh_csa_beacon(sdata, params, changed); if (err < 0) { ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; return err; } } if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) ieee80211_send_action_csa(sdata, params); break; } #endif default: return -EOPNOTSUPP; } return 0; } static void ieee80211_color_change_abort(struct ieee80211_link_data *link) { link->conf->color_change_active = false; ieee80211_free_next_beacon(link); cfg80211_color_change_aborted_notify(link->sdata->dev, link->link_id); } static int __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_chan_req chanreq = { .oper = params->chandef }; struct ieee80211_local *local = sdata->local; struct ieee80211_channel_switch ch_switch = { .link_id = params->link_id, }; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; struct ieee80211_bss_conf *link_conf; struct ieee80211_link_data *link_data; u64 changed = 0; u8 link_id = params->link_id; int err; lockdep_assert_wiphy(local->hw.wiphy); if (ieee80211_is_scan_ongoing(wiphy, local, ¶ms->chandef)) return -EBUSY; if (sdata->wdev.links[link_id].cac_started) return -EBUSY; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return -EINVAL; link_data = wiphy_dereference(wiphy, sdata->link[link_id]); if (!link_data) return -ENOLINK; link_conf = link_data->conf; if (chanreq.oper.punctured && !link_conf->eht_support) return -EINVAL; /* don't allow another channel switch if one is already active. */ if (link_conf->csa_active) return -EBUSY; conf = wiphy_dereference(wiphy, link_conf->chanctx_conf); if (!conf) { err = -EBUSY; goto out; } if (params->chandef.chan->freq_offset) { /* this may work, but is untested */ err = -EOPNOTSUPP; goto out; } err = ieee80211_set_unsol_bcast_probe_resp(sdata, ¶ms->unsol_bcast_probe_resp, link_data, link_conf, &changed); if (err) goto out; chanctx = container_of(conf, struct ieee80211_chanctx, conf); ch_switch.timestamp = 0; ch_switch.device_timestamp = 0; ch_switch.block_tx = params->block_tx; ch_switch.chandef = chanreq.oper; ch_switch.count = params->count; err = drv_pre_channel_switch(sdata, &ch_switch); if (err) goto out; err = ieee80211_link_reserve_chanctx(link_data, &chanreq, chanctx->mode, params->radar_required); if (err) goto out; /* if reservation is invalid then this will fail */ err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0, -1); if (err) { ieee80211_link_unreserve_chanctx(link_data); goto out; } /* if there is a color change in progress, abort it */ if (link_conf->color_change_active) ieee80211_color_change_abort(link_data); err = ieee80211_set_csa_beacon(link_data, params, &changed); if (err) { ieee80211_link_unreserve_chanctx(link_data); goto out; } link_data->csa.chanreq = chanreq; link_conf->csa_active = true; if (params->block_tx) ieee80211_vif_block_queues_csa(sdata); cfg80211_ch_switch_started_notify(sdata->dev, &link_data->csa.chanreq.oper, link_id, params->count, params->block_tx); if (changed) { ieee80211_link_info_change_notify(sdata, link_data, changed); drv_channel_switch_beacon(sdata, &link_data->csa.chanreq.oper); } else { /* if the beacon didn't change, we can finalize immediately */ ieee80211_csa_finalize(link_data); } out: return err; } int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); return __ieee80211_channel_switch(wiphy, dev, params); } u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local) { lockdep_assert_wiphy(local->hw.wiphy); local->roc_cookie_counter++; /* wow, you wrapped 64 bits ... more likely a bug */ if (WARN_ON(local->roc_cookie_counter == 0)) local->roc_cookie_counter++; return local->roc_cookie_counter; } int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp) { unsigned long spin_flags; struct sk_buff *ack_skb; int id; ack_skb = skb_copy(skb, gfp); if (!ack_skb) return -ENOMEM; spin_lock_irqsave(&local->ack_status_lock, spin_flags); id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x2000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, spin_flags); if (id < 0) { kfree_skb(ack_skb); return -ENOMEM; } IEEE80211_SKB_CB(skb)->status_data_idr = 1; IEEE80211_SKB_CB(skb)->status_data = id; *cookie = ieee80211_mgmt_tx_cookie(local); IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie; return 0; } static void ieee80211_update_mgmt_frame_registrations(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); u32 preq_mask = BIT(IEEE80211_STYPE_PROBE_REQ >> 4); u32 action_mask = BIT(IEEE80211_STYPE_ACTION >> 4); bool global_change, intf_change; global_change = (local->probe_req_reg != !!(upd->global_stypes & preq_mask)) || (local->rx_mcast_action_reg != !!(upd->global_mcast_stypes & action_mask)); local->probe_req_reg = upd->global_stypes & preq_mask; local->rx_mcast_action_reg = upd->global_mcast_stypes & action_mask; intf_change = (sdata->vif.probe_req_reg != !!(upd->interface_stypes & preq_mask)) || (sdata->vif.rx_mcast_action_reg != !!(upd->interface_mcast_stypes & action_mask)); sdata->vif.probe_req_reg = upd->interface_stypes & preq_mask; sdata->vif.rx_mcast_action_reg = upd->interface_mcast_stypes & action_mask; if (!local->open_count) return; if (intf_change && ieee80211_sdata_running(sdata)) drv_config_iface_filter(local, sdata, sdata->vif.probe_req_reg ? FIF_PROBE_REQ : 0, FIF_PROBE_REQ); if (global_change) ieee80211_configure_filter(local); } static int ieee80211_set_antenna(struct wiphy *wiphy, int radio_idx, u32 tx_ant, u32 rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); int ret; if (local->started) return -EOPNOTSUPP; ret = drv_set_antenna(local, tx_ant, rx_ant); if (ret) return ret; local->rx_chains = hweight8(rx_ant); return 0; } static int ieee80211_get_antenna(struct wiphy *wiphy, int radio_idx, u32 *tx_ant, u32 *rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); return drv_get_antenna(local, radio_idx, tx_ant, rx_ant); } static int ieee80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_gtk_rekey_data *data) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!local->ops->set_rekey_data) return -EOPNOTSUPP; drv_set_rekey_data(local, sdata, data); return 0; } static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_qos_hdr *nullfunc; struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; bool qos; struct ieee80211_tx_info *info; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; int ret; /* the lock is needed to assign the cookie later */ lockdep_assert_wiphy(local->hw.wiphy); rcu_read_lock(); sta = sta_info_get_bss(sdata, peer); if (!sta) { ret = -ENOLINK; goto unlock; } qos = sta->sta.wme; chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON(!chanctx_conf)) { ret = -EINVAL; goto unlock; } band = chanctx_conf->def.chan->band; if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | IEEE80211_FCTL_FROMDS); } else { size -= 2; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS); } skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); if (!skb) { ret = -ENOMEM; goto unlock; } skb->dev = dev; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put(skb, size); nullfunc->frame_control = fc; nullfunc->duration_id = 0; memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); nullfunc->seq_ctrl = 0; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_NL80211_FRAME_TX; info->band = band; skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb->priority = 7; if (qos) nullfunc->qos_ctrl = cpu_to_le16(7); ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_ATOMIC); if (ret) { kfree_skb(skb); goto unlock; } local_bh_disable(); ieee80211_xmit(sdata, sta, skb); local_bh_enable(); ret = 0; unlock: rcu_read_unlock(); return ret; } static int ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_link_data *link; int ret = -ENODATA; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (!link) { ret = -ENOLINK; goto out; } chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (chanctx_conf) { *chandef = link->conf->chanreq.oper; ret = 0; } else if (local->open_count > 0 && local->open_count == local->virt_monitors && sdata->vif.type == NL80211_IFTYPE_MONITOR) { *chandef = local->monitor_chanreq.oper; ret = 0; } out: rcu_read_unlock(); return ret; } #ifdef CONFIG_PM static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled) { drv_set_wakeup(wiphy_priv(wiphy), enabled); } #endif static int ieee80211_set_qos_map(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_qos_map *qos_map) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct mac80211_qos_map *new_qos_map, *old_qos_map; if (qos_map) { new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL); if (!new_qos_map) return -ENOMEM; memcpy(&new_qos_map->qos_map, qos_map, sizeof(*qos_map)); } else { /* A NULL qos_map was passed to disable QoS mapping */ new_qos_map = NULL; } old_qos_map = sdata_dereference(sdata->qos_map, sdata); rcu_assign_pointer(sdata->qos_map, new_qos_map); if (old_qos_map) kfree_rcu(old_qos_map, rcu_head); return 0; } static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link; struct ieee80211_chan_req chanreq = { .oper = *chandef }; int ret; u64 changed = 0; link = sdata_dereference(sdata->link[link_id], sdata); ret = ieee80211_link_change_chanreq(link, &chanreq, &changed); if (ret == 0) ieee80211_link_info_change_notify(sdata, link, changed); return ret; } static int ieee80211_add_tx_ts(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer, u8 up, u16 admitted_time) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ac = ieee802_1d_to_ac[up]; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!(sdata->wmm_acm & BIT(up))) return -EINVAL; if (ifmgd->tx_tspec[ac].admitted_time) return -EBUSY; if (admitted_time) { ifmgd->tx_tspec[ac].admitted_time = 32 * admitted_time; ifmgd->tx_tspec[ac].tsid = tsid; ifmgd->tx_tspec[ac].up = up; } return 0; } static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = wiphy_priv(wiphy); int ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; /* skip unused entries */ if (!tx_tspec->admitted_time) continue; if (tx_tspec->tsid != tsid) continue; /* due to this new packets will be reassigned to non-ACM ACs */ tx_tspec->up = -1; /* Make sure that all packets have been sent to avoid to * restore the QoS params on packets that are still on the * queues. */ synchronize_net(); ieee80211_flush_queues(local, sdata, false); /* restore the normal QoS parameters * (unconditionally to avoid races) */ tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; tx_tspec->downgraded = false; ieee80211_sta_handle_tspec_ac_params(sdata); /* finally clear all the data */ memset(tx_tspec, 0, sizeof(*tx_tspec)); return 0; } return -ENOENT; } void ieee80211_nan_func_terminated(struct ieee80211_vif *vif, u8 inst_id, enum nl80211_nan_func_term_reason reason, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct cfg80211_nan_func *func; u64 cookie; if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = idr_find(&sdata->u.nan.function_inst_ids, inst_id); if (WARN_ON(!func)) { spin_unlock_bh(&sdata->u.nan.func_lock); return; } cookie = func->cookie; idr_remove(&sdata->u.nan.function_inst_ids, inst_id); spin_unlock_bh(&sdata->u.nan.func_lock); cfg80211_free_nan_func(func); cfg80211_nan_func_terminated(ieee80211_vif_to_wdev(vif), inst_id, reason, cookie, gfp); } EXPORT_SYMBOL(ieee80211_nan_func_terminated); void ieee80211_nan_func_match(struct ieee80211_vif *vif, struct cfg80211_nan_match_params *match, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct cfg80211_nan_func *func; if (WARN_ON(vif->type != NL80211_IFTYPE_NAN)) return; spin_lock_bh(&sdata->u.nan.func_lock); func = idr_find(&sdata->u.nan.function_inst_ids, match->inst_id); if (WARN_ON(!func)) { spin_unlock_bh(&sdata->u.nan.func_lock); return; } match->cookie = func->cookie; spin_unlock_bh(&sdata->u.nan.func_lock); cfg80211_nan_match(ieee80211_vif_to_wdev(vif), match, gfp); } EXPORT_SYMBOL(ieee80211_nan_func_match); static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy, struct net_device *dev, const bool enabled) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata->u.ap.multicast_to_unicast = enabled; return 0; } void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, struct txq_info *txqi) { if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_BYTES))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_BYTES); txqstats->backlog_bytes = txqi->tin.backlog_bytes; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS); txqstats->backlog_packets = txqi->tin.backlog_packets; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_FLOWS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_FLOWS); txqstats->flows = txqi->tin.flows; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_DROPS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_DROPS); txqstats->drops = txqi->cstats.drop_count; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_ECN_MARKS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_ECN_MARKS); txqstats->ecn_marks = txqi->cstats.ecn_mark; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_OVERLIMIT))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_OVERLIMIT); txqstats->overlimit = txqi->tin.overlimit; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_COLLISIONS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_COLLISIONS); txqstats->collisions = txqi->tin.collisions; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_BYTES))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_BYTES); txqstats->tx_bytes = txqi->tin.tx_bytes; } if (!(txqstats->filled & BIT(NL80211_TXQ_STATS_TX_PACKETS))) { txqstats->filled |= BIT(NL80211_TXQ_STATS_TX_PACKETS); txqstats->tx_packets = txqi->tin.tx_packets; } } static int ieee80211_get_txq_stats(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; int ret = 0; spin_lock_bh(&local->fq.lock); if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (!sdata->vif.txq) { ret = 1; goto out; } ieee80211_fill_txq_stats(txqstats, to_txq_info(sdata->vif.txq)); } else { /* phy stats */ txqstats->filled |= BIT(NL80211_TXQ_STATS_BACKLOG_PACKETS) | BIT(NL80211_TXQ_STATS_BACKLOG_BYTES) | BIT(NL80211_TXQ_STATS_OVERLIMIT) | BIT(NL80211_TXQ_STATS_OVERMEMORY) | BIT(NL80211_TXQ_STATS_COLLISIONS) | BIT(NL80211_TXQ_STATS_MAX_FLOWS); txqstats->backlog_packets = local->fq.backlog; txqstats->backlog_bytes = local->fq.memory_usage; txqstats->overlimit = local->fq.overlimit; txqstats->overmemory = local->fq.overmemory; txqstats->collisions = local->fq.collisions; txqstats->max_flows = local->fq.flows_cnt; } out: spin_unlock_bh(&local->fq.lock); return ret; } static int ieee80211_get_ftm_responder_stats(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); return drv_get_ftm_responder_stats(local, sdata, ftm_stats); } static int ieee80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, struct cfg80211_pmsr_request *request) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); return drv_start_pmsr(local, sdata, request); } static void ieee80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *dev, struct cfg80211_pmsr_request *request) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev); return drv_abort_pmsr(local, sdata, request); } static int ieee80211_set_tid_config(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_tid_config *tid_conf) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (!sdata->local->ops->set_tid_config) return -EOPNOTSUPP; if (!tid_conf->peer) return drv_set_tid_config(sdata->local, sdata, NULL, tid_conf); sta = sta_info_get_bss(sdata, tid_conf->peer); if (!sta) return -ENOENT; return drv_set_tid_config(sdata->local, sdata, &sta->sta, tid_conf); } static int ieee80211_reset_tid_config(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 tids) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (!sdata->local->ops->reset_tid_config) return -EOPNOTSUPP; if (!peer) return drv_reset_tid_config(sdata->local, sdata, NULL, tids); sta = sta_info_get_bss(sdata, peer); if (!sta) return -ENOENT; return drv_reset_tid_config(sdata->local, sdata, &sta->sta, tids); } static int ieee80211_set_sar_specs(struct wiphy *wiphy, struct cfg80211_sar_specs *sar) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->set_sar_specs) return -EOPNOTSUPP; return local->ops->set_sar_specs(&local->hw, sar); } static int ieee80211_set_after_color_change_beacon(struct ieee80211_link_data *link, u64 *changed) { struct ieee80211_sub_if_data *sdata = link->sdata; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: { int ret; if (!link->u.ap.next_beacon) return -EINVAL; ret = ieee80211_assign_beacon(sdata, link, link->u.ap.next_beacon, NULL, NULL, changed); ieee80211_free_next_beacon(link); if (ret < 0) return ret; break; } default: WARN_ON_ONCE(1); return -EINVAL; } return 0; } static int ieee80211_set_color_change_beacon(struct ieee80211_link_data *link, struct cfg80211_color_change_settings *params, u64 *changed) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_color_change_settings color_change = {}; int err; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: link->u.ap.next_beacon = cfg80211_beacon_dup(¶ms->beacon_next); if (!link->u.ap.next_beacon) return -ENOMEM; if (params->count <= 1) break; color_change.counter_offset_beacon = params->counter_offset_beacon; color_change.counter_offset_presp = params->counter_offset_presp; color_change.count = params->count; err = ieee80211_assign_beacon(sdata, link, ¶ms->beacon_color_change, NULL, &color_change, changed); if (err < 0) { ieee80211_free_next_beacon(link); return err; } break; default: return -EOPNOTSUPP; } return 0; } static void ieee80211_color_change_bss_config_notify(struct ieee80211_link_data *link, u8 color, int enable, u64 changed) { struct ieee80211_sub_if_data *sdata = link->sdata; lockdep_assert_wiphy(sdata->local->hw.wiphy); link->conf->he_bss_color.color = color; link->conf->he_bss_color.enabled = enable; changed |= BSS_CHANGED_HE_BSS_COLOR; ieee80211_link_info_change_notify(sdata, link, changed); if (!link->conf->nontransmitted && rcu_access_pointer(link->conf->tx_bss_conf)) { struct ieee80211_link_data *tmp; for_each_sdata_link(sdata->local, tmp) { if (tmp->sdata == sdata || rcu_access_pointer(tmp->conf->tx_bss_conf) != link->conf) continue; tmp->conf->he_bss_color.color = color; tmp->conf->he_bss_color.enabled = enable; ieee80211_link_info_change_notify(tmp->sdata, tmp, BSS_CHANGED_HE_BSS_COLOR); } } } static int ieee80211_color_change_finalize(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; u64 changed = 0; int err; lockdep_assert_wiphy(local->hw.wiphy); link->conf->color_change_active = false; err = ieee80211_set_after_color_change_beacon(link, &changed); if (err) { cfg80211_color_change_aborted_notify(sdata->dev, link->link_id); return err; } ieee80211_color_change_bss_config_notify(link, link->conf->color_change_color, 1, changed); cfg80211_color_change_notify(sdata->dev, link->link_id); return 0; } void ieee80211_color_change_finalize_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, color_change_finalize_work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); /* AP might have been stopped while waiting for the lock. */ if (!link_conf->color_change_active) return; if (!ieee80211_sdata_running(sdata)) return; ieee80211_color_change_finalize(link); } void ieee80211_color_collision_detection_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, color_collision_detect_work.work); struct ieee80211_sub_if_data *sdata = link->sdata; cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap, link->link_id); } void ieee80211_color_change_finish(struct ieee80211_vif *vif, u8 link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_link_data *link; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link)) { rcu_read_unlock(); return; } wiphy_work_queue(sdata->local->hw.wiphy, &link->color_change_finalize_work); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_color_change_finish); void ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif, u64 color_bitmap, u8 link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_link_data *link; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link)) { rcu_read_unlock(); return; } if (link->conf->color_change_active || link->conf->csa_active) { rcu_read_unlock(); return; } if (wiphy_delayed_work_pending(sdata->local->hw.wiphy, &link->color_collision_detect_work)) { rcu_read_unlock(); return; } link->color_bitmap = color_bitmap; /* queue the color collision detection event every 500 ms in order to * avoid sending too much netlink messages to userspace. */ wiphy_delayed_work_queue(sdata->local->hw.wiphy, &link->color_collision_detect_work, msecs_to_jiffies(500)); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_obss_color_collision_notify); static int ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_bss_conf *link_conf; struct ieee80211_link_data *link; u8 link_id = params->link_id; u64 changed = 0; int err; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return -EINVAL; link = wiphy_dereference(wiphy, sdata->link[link_id]); if (!link) return -ENOLINK; link_conf = link->conf; if (link_conf->nontransmitted) return -EINVAL; /* don't allow another color change if one is already active or if csa * is active */ if (link_conf->color_change_active || link_conf->csa_active) { err = -EBUSY; goto out; } err = ieee80211_set_unsol_bcast_probe_resp(sdata, ¶ms->unsol_bcast_probe_resp, link, link_conf, &changed); if (err) goto out; err = ieee80211_set_color_change_beacon(link, params, &changed); if (err) goto out; link_conf->color_change_active = true; link_conf->color_change_color = params->color; cfg80211_color_change_started_notify(sdata->dev, params->count, link_id); if (changed) ieee80211_color_change_bss_config_notify(link, 0, 0, changed); else /* if the beacon didn't change, we can finalize immediately */ ieee80211_color_change_finalize(link); out: return err; } static int ieee80211_set_radar_background(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->set_radar_background) return -EOPNOTSUPP; return local->ops->set_radar_background(&local->hw, chandef); } static int ieee80211_add_intf_link(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); lockdep_assert_wiphy(sdata->local->hw.wiphy); if (wdev->use_4addr) return -EOPNOTSUPP; return ieee80211_vif_set_links(sdata, wdev->valid_links, 0); } static void ieee80211_del_intf_link(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); u16 new_links = wdev->valid_links & ~BIT(link_id); lockdep_assert_wiphy(sdata->local->hw.wiphy); /* During the link teardown process, certain functions require the * link_id to remain in the valid_links bitmap. Therefore, instead * of removing the link_id from the bitmap, pass a masked value to * simulate as if link_id does not exist anymore. */ ieee80211_vif_set_links(sdata, new_links, 0); } static int ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev, struct link_station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; int ret; lockdep_assert_wiphy(local->hw.wiphy); sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; if (!sta->sta.valid_links) return -EINVAL; if (sta->sta.valid_links & BIT(params->link_id)) return -EALREADY; ret = ieee80211_sta_allocate_link(sta, params->link_id); if (ret) return ret; ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_NEW, params); if (ret) { ieee80211_sta_free_link(sta, params->link_id); return ret; } if (test_sta_flag(sta, WLAN_STA_ASSOC)) { struct link_sta_info *link_sta; link_sta = sdata_dereference(sta->link[params->link_id], sdata); rate_control_rate_init(link_sta); } /* ieee80211_sta_activate_link frees the link upon failure */ return ieee80211_sta_activate_link(sta, params->link_id); } static int ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev, struct link_station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; lockdep_assert_wiphy(local->hw.wiphy); sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; if (!(sta->sta.valid_links & BIT(params->link_id))) return -EINVAL; return sta_link_apply_parameters(local, sta, STA_LINK_MODE_LINK_MODIFY, params); } static int ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev, struct link_station_del_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; if (!(sta->sta.valid_links & BIT(params->link_id))) return -EINVAL; /* must not create a STA without links */ if (sta->sta.valid_links == BIT(params->link_id)) return -EINVAL; ieee80211_sta_remove_link(sta, params->link_id); return 0; } static int ieee80211_set_hw_timestamp(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; if (!local->ops->set_hw_timestamp) return -EOPNOTSUPP; if (!check_sdata_in_driver(sdata)) return -EIO; return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts); } static int ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ttlm_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); lockdep_assert_wiphy(sdata->local->hw.wiphy); return ieee80211_req_neg_ttlm(sdata, params); } static int ieee80211_assoc_ml_reconf(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ml_reconf_req *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); lockdep_assert_wiphy(sdata->local->hw.wiphy); return ieee80211_mgd_assoc_ml_reconf(sdata, req); } static int ieee80211_set_epcs(struct wiphy *wiphy, struct net_device *dev, bool enable) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); return ieee80211_mgd_set_epcs(sdata, enable); } const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, .change_virtual_intf = ieee80211_change_iface, .start_p2p_device = ieee80211_start_p2p_device, .stop_p2p_device = ieee80211_stop_p2p_device, .add_key = ieee80211_add_key, .del_key = ieee80211_del_key, .get_key = ieee80211_get_key, .set_default_key = ieee80211_config_default_key, .set_default_mgmt_key = ieee80211_config_default_mgmt_key, .set_default_beacon_key = ieee80211_config_default_beacon_key, .start_ap = ieee80211_start_ap, .change_beacon = ieee80211_change_beacon, .stop_ap = ieee80211_stop_ap, .add_station = ieee80211_add_station, .del_station = ieee80211_del_station, .change_station = ieee80211_change_station, .get_station = ieee80211_get_station, .dump_station = ieee80211_dump_station, .dump_survey = ieee80211_dump_survey, #ifdef CONFIG_MAC80211_MESH .add_mpath = ieee80211_add_mpath, .del_mpath = ieee80211_del_mpath, .change_mpath = ieee80211_change_mpath, .get_mpath = ieee80211_get_mpath, .dump_mpath = ieee80211_dump_mpath, .get_mpp = ieee80211_get_mpp, .dump_mpp = ieee80211_dump_mpp, .update_mesh_config = ieee80211_update_mesh_config, .get_mesh_config = ieee80211_get_mesh_config, .join_mesh = ieee80211_join_mesh, .leave_mesh = ieee80211_leave_mesh, #endif .join_ocb = ieee80211_join_ocb, .leave_ocb = ieee80211_leave_ocb, .change_bss = ieee80211_change_bss, .inform_bss = ieee80211_inform_bss, .set_txq_params = ieee80211_set_txq_params, .set_monitor_channel = ieee80211_set_monitor_channel, .suspend = ieee80211_suspend, .resume = ieee80211_resume, .scan = ieee80211_scan, .abort_scan = ieee80211_abort_scan, .sched_scan_start = ieee80211_sched_scan_start, .sched_scan_stop = ieee80211_sched_scan_stop, .auth = ieee80211_auth, .assoc = ieee80211_assoc, .deauth = ieee80211_deauth, .disassoc = ieee80211_disassoc, .join_ibss = ieee80211_join_ibss, .leave_ibss = ieee80211_leave_ibss, .set_mcast_rate = ieee80211_set_mcast_rate, .set_wiphy_params = ieee80211_set_wiphy_params, .set_tx_power = ieee80211_set_tx_power, .get_tx_power = ieee80211_get_tx_power, .rfkill_poll = ieee80211_rfkill_poll, CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump) .set_power_mgmt = ieee80211_set_power_mgmt, .set_bitrate_mask = ieee80211_set_bitrate_mask, .remain_on_channel = ieee80211_remain_on_channel, .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, .mgmt_tx = ieee80211_mgmt_tx, .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait, .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, .set_cqm_rssi_range_config = ieee80211_set_cqm_rssi_range_config, .update_mgmt_frame_registrations = ieee80211_update_mgmt_frame_registrations, .set_antenna = ieee80211_set_antenna, .get_antenna = ieee80211_get_antenna, .set_rekey_data = ieee80211_set_rekey_data, .tdls_oper = ieee80211_tdls_oper, .tdls_mgmt = ieee80211_tdls_mgmt, .tdls_channel_switch = ieee80211_tdls_channel_switch, .tdls_cancel_channel_switch = ieee80211_tdls_cancel_channel_switch, .probe_client = ieee80211_probe_client, .set_noack_map = ieee80211_set_noack_map, #ifdef CONFIG_PM .set_wakeup = ieee80211_set_wakeup, #endif .get_channel = ieee80211_cfg_get_channel, .start_radar_detection = ieee80211_start_radar_detection, .end_cac = ieee80211_end_cac, .channel_switch = ieee80211_channel_switch, .set_qos_map = ieee80211_set_qos_map, .set_ap_chanwidth = ieee80211_set_ap_chanwidth, .add_tx_ts = ieee80211_add_tx_ts, .del_tx_ts = ieee80211_del_tx_ts, .start_nan = ieee80211_start_nan, .stop_nan = ieee80211_stop_nan, .nan_change_conf = ieee80211_nan_change_conf, .add_nan_func = ieee80211_add_nan_func, .del_nan_func = ieee80211_del_nan_func, .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast, .tx_control_port = ieee80211_tx_control_port, .get_txq_stats = ieee80211_get_txq_stats, .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats, .start_pmsr = ieee80211_start_pmsr, .abort_pmsr = ieee80211_abort_pmsr, .probe_mesh_link = ieee80211_probe_mesh_link, .set_tid_config = ieee80211_set_tid_config, .reset_tid_config = ieee80211_reset_tid_config, .set_sar_specs = ieee80211_set_sar_specs, .color_change = ieee80211_color_change, .set_radar_background = ieee80211_set_radar_background, .add_intf_link = ieee80211_add_intf_link, .del_intf_link = ieee80211_del_intf_link, .add_link_station = ieee80211_add_link_station, .mod_link_station = ieee80211_mod_link_station, .del_link_station = ieee80211_del_link_station, .set_hw_timestamp = ieee80211_set_hw_timestamp, .set_ttlm = ieee80211_set_ttlm, .get_radio_mask = ieee80211_get_radio_mask, .assoc_ml_reconf = ieee80211_assoc_ml_reconf, .set_epcs = ieee80211_set_epcs, }; |
| 3 3 3 3 3 3 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic pidhash and scalable, time-bounded PID allocator * * (C) 2002-2003 Nadia Yvette Chambers, IBM * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and * parking tasks using given ID's on a list. * * The hash is always changed with the tasklist_lock write-acquired, * and the hash is only accessed with the tasklist_lock at least * read-acquired, so there's no additional SMP locking needed here. * * We have a list of bitmap pages, which bitmaps represent the PID space. * Allocating and freeing PIDs is completely lockless. The worst-case * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). * * Pid namespaces: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/memblock.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/refcount.h> #include <linux/anon_inodes.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/idr.h> #include <linux/pidfs.h> #include <linux/seqlock.h> #include <net/sock.h> #include <uapi/linux/pidfd.h> struct pid init_struct_pid = { .count = REFCOUNT_INIT(1), .tasks = { { .first = NULL }, { .first = NULL }, { .first = NULL }, }, .level = 0, .numbers = { { .nr = 0, .ns = &init_pid_ns, }, } }; static int pid_max_min = RESERVED_PIDS + 1; static int pid_max_max = PID_MAX_LIMIT; /* * PID-map pages start out as NULL, they get allocated upon * first use and are never deallocated. This way a low pid_max * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { .ns = NS_COMMON_INIT(init_pid_ns), .idr = IDR_INIT(init_pid_ns.idr), .pid_allocated = PIDNS_ADDING, .level = 0, .child_reaper = &init_task, .user_ns = &init_user_ns, .pid_max = PID_MAX_DEFAULT, #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, #endif }; EXPORT_SYMBOL_GPL(init_pid_ns); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock); void put_pid(struct pid *pid) { struct pid_namespace *ns; if (!pid) return; ns = pid->numbers[pid->level].ns; if (refcount_dec_and_test(&pid->count)) { pidfs_free_pid(pid); kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } } EXPORT_SYMBOL_GPL(put_pid); static void delayed_put_pid(struct rcu_head *rhp) { struct pid *pid = container_of(rhp, struct pid, rcu); put_pid(pid); } void free_pid(struct pid *pid) { int i; struct pid_namespace *active_ns; lockdep_assert_not_held(&tasklist_lock); active_ns = pid->numbers[pid->level].ns; ns_ref_active_put(active_ns); spin_lock(&pidmap_lock); for (i = 0; i <= pid->level; i++) { struct upid *upid = pid->numbers + i; struct pid_namespace *ns = upid->ns; switch (--ns->pid_allocated) { case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper * may be sleeping in zap_pid_ns_processes(). */ wake_up_process(ns->child_reaper); break; case PIDNS_ADDING: /* Handle a fork failure of the first process */ WARN_ON(ns->child_reaper); ns->pid_allocated = 0; break; } idr_remove(&ns->idr, upid->nr); } pidfs_remove_pid(pid); spin_unlock(&pidmap_lock); call_rcu(&pid->rcu, delayed_put_pid); } void free_pids(struct pid **pids) { int tmp; /* * This can batch pidmap_lock. */ for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pids[tmp]) free_pid(pids[tmp]); } struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, size_t set_tid_size) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; int retval = -ENOMEM; /* * set_tid_size contains the size of the set_tid array. Starting at * the most nested currently active PID namespace it tells alloc_pid() * which PID to set for a process in that most nested PID namespace * up to set_tid_size PID namespaces. It does not have to set the PID * for a process in all nested PID namespaces but set_tid_size must * never be greater than the current ns->level + 1. */ if (set_tid_size > ns->level + 1) return ERR_PTR(-EINVAL); pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) return ERR_PTR(retval); tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { int tid = 0; int pid_max = READ_ONCE(tmp->pid_max); if (set_tid_size) { tid = set_tid[ns->level - i]; retval = -EINVAL; if (tid < 1 || tid >= pid_max) goto out_free; /* * Also fail if a PID != 1 is requested and * no PID 1 exists. */ if (tid != 1 && !tmp->child_reaper) goto out_free; retval = -EPERM; if (!checkpoint_restore_ns_capable(tmp->user_ns)) goto out_free; set_tid_size--; } idr_preload(GFP_KERNEL); spin_lock(&pidmap_lock); if (tid) { nr = idr_alloc(&tmp->idr, NULL, tid, tid + 1, GFP_ATOMIC); /* * If ENOSPC is returned it means that the PID is * alreay in use. Return EEXIST in that case. */ if (nr == -ENOSPC) nr = -EEXIST; } else { int pid_min = 1; /* * init really needs pid 1, but after reaching the * maximum wrap back to RESERVED_PIDS */ if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) pid_min = RESERVED_PIDS; /* * Store a null pointer so find_pid_ns does not find * a partially initialized PID (see below). */ nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, pid_max, GFP_ATOMIC); } spin_unlock(&pidmap_lock); idr_preload_end(); if (nr < 0) { retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } /* * ENOMEM is not the most obvious choice especially for the case * where the child subreaper has already exited and the pid * namespace denies the creation of any new processes. But ENOMEM * is what we have exposed to userspace for a long time and it is * documented behavior for pid namespaces. So we can't easily * change it even if there were an error code better suited. */ retval = -ENOMEM; get_pid_ns(ns); refcount_set(&pid->count, 1); spin_lock_init(&pid->lock); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); init_waitqueue_head(&pid->wait_pidfd); INIT_HLIST_HEAD(&pid->inodes); upid = pid->numbers + ns->level; idr_preload(GFP_KERNEL); spin_lock(&pidmap_lock); if (!(ns->pid_allocated & PIDNS_ADDING)) goto out_unlock; pidfs_add_pid(pid); for ( ; upid >= pid->numbers; --upid) { /* Make the PID visible to find_pid_ns. */ idr_replace(&upid->ns->idr, pid, upid->nr); upid->ns->pid_allocated++; } spin_unlock(&pidmap_lock); idr_preload_end(); ns_ref_active_get(ns); return pid; out_unlock: spin_unlock(&pidmap_lock); idr_preload_end(); put_pid_ns(ns); out_free: spin_lock(&pidmap_lock); while (++i <= ns->level) { upid = pid->numbers + i; idr_remove(&upid->ns->idr, upid->nr); } /* On failure to allocate the first pid, reset the state */ if (ns->pid_allocated == PIDNS_ADDING) idr_set_cursor(&ns->idr, 0); spin_unlock(&pidmap_lock); kmem_cache_free(ns->pid_cachep, pid); return ERR_PTR(retval); } void disable_pid_allocation(struct pid_namespace *ns) { spin_lock(&pidmap_lock); ns->pid_allocated &= ~PIDNS_ADDING; spin_unlock(&pidmap_lock); } struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { return idr_find(&ns->idr, nr); } EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) { return (type == PIDTYPE_PID) ? &task->thread_pid : &task->signal->pids[type]; } /* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; lockdep_assert_held_write(&tasklist_lock); pid = *task_pid_ptr(task, type); hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); } static void __change_pid(struct pid **pids, struct task_struct *task, enum pid_type type, struct pid *new) { struct pid **pid_ptr, *pid; int tmp; lockdep_assert_held_write(&tasklist_lock); pid_ptr = task_pid_ptr(task, type); pid = *pid_ptr; hlist_del_rcu(&task->pid_links[type]); *pid_ptr = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pid_has_task(pid, tmp)) return; WARN_ON(pids[type]); pids[type] = pid; } void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type) { __change_pid(pids, task, type, NULL); } void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type, struct pid *pid) { __change_pid(pids, task, type, pid); attach_pid(task, type); } void exchange_tids(struct task_struct *left, struct task_struct *right) { struct pid *pid1 = left->thread_pid; struct pid *pid2 = right->thread_pid; struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; lockdep_assert_held_write(&tasklist_lock); /* Swap the single entry tid lists */ hlists_swap_heads_rcu(head1, head2); /* Swap the per task_struct pid */ rcu_assign_pointer(left->thread_pid, pid2); rcu_assign_pointer(right->thread_pid, pid1); /* Swap the cached value */ WRITE_ONCE(left->pid, pid_nr(pid2)); WRITE_ONCE(right->pid, pid_nr(pid1)); } /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { WARN_ON_ONCE(type == PIDTYPE_PID); lockdep_assert_held_write(&tasklist_lock); hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); } struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pid_links[(type)]); } return result; } EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock(). */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock() protection"); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } struct task_struct *find_task_by_vpid(pid_t vnr) { return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } struct task_struct *find_get_task_by_vpid(pid_t nr) { struct task_struct *task; rcu_read_lock(); task = find_task_by_vpid(nr); if (task) get_task_struct(task); rcu_read_unlock(); return task; } struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result; rcu_read_lock(); result = pid_task(pid, type); if (result) get_task_struct(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { struct pid *pid; rcu_read_lock(); pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; if (pid && ns && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; } return nr; } EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns) { pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); if (ns) nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); rcu_read_unlock(); return nr; } EXPORT_SYMBOL(__task_pid_nr_ns); struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); } EXPORT_SYMBOL_GPL(task_active_pid_ns); /* * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { return idr_get_next(&ns->idr, &nr); } EXPORT_SYMBOL_GPL(find_ge_pid); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) { CLASS(fd, f)(fd); struct pid *pid; if (fd_empty(f)) return ERR_PTR(-EBADF); pid = pidfd_pid(fd_file(f)); if (!IS_ERR(pid)) { get_pid(pid); *flags = fd_file(f)->f_flags; } return pid; } /** * pidfd_get_task() - Get the task associated with a pidfd * * @pidfd: pidfd for which to get the task * @flags: flags associated with this pidfd * * Return the task associated with @pidfd. The function takes a reference on * the returned task. The caller is responsible for releasing that reference. * * Return: On success, the task_struct associated with the pidfd. * On error, a negative errno number will be returned. */ struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) { unsigned int f_flags = 0; struct pid *pid; struct task_struct *task; enum pid_type type; switch (pidfd) { case PIDFD_SELF_THREAD: type = PIDTYPE_PID; pid = get_task_pid(current, type); break; case PIDFD_SELF_THREAD_GROUP: type = PIDTYPE_TGID; pid = get_task_pid(current, type); break; default: pid = pidfd_get_pid(pidfd, &f_flags); if (IS_ERR(pid)) return ERR_CAST(pid); type = PIDTYPE_TGID; break; } task = get_pid_task(pid, type); put_pid(pid); if (!task) return ERR_PTR(-ESRCH); *flags = f_flags; return task; } /** * pidfd_create() - Create a new pid file descriptor. * * @pid: struct pid that the pidfd will reference * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set. * * Note, that this function can only be called after the fd table has * been unshared to avoid leaking the pidfd to the new process. * * This symbol should not be explicitly exported to loadable modules. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ static int pidfd_create(struct pid *pid, unsigned int flags) { int pidfd; struct file *pidfd_file; pidfd = pidfd_prepare(pid, flags, &pidfd_file); if (pidfd < 0) return pidfd; fd_install(pidfd, pidfd_file); return pidfd; } /** * sys_pidfd_open() - Open new pid file descriptor. * * @pid: pid for which to retrieve a pidfd * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set for * the task identified by @pid. Without PIDFD_THREAD flag the target task * must be a thread-group leader. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) { int fd; struct pid *p; if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) return -EINVAL; if (pid <= 0) return -EINVAL; p = find_get_pid(pid); if (!p) return -ESRCH; fd = pidfd_create(p, flags); put_pid(p); return fd; } #ifdef CONFIG_SYSCTL static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root) { return &task_active_pid_ns(current)->set; } static int set_is_seen(struct ctl_table_set *set) { return &task_active_pid_ns(current)->set == set; } static int pid_table_root_permissions(struct ctl_table_header *head, const struct ctl_table *table) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); int mode = table->mode; if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) || uid_eq(current_euid(), make_kuid(pidns->user_ns, 0))) mode = (mode & S_IRWXU) >> 6; else if (in_egroup_p(make_kgid(pidns->user_ns, 0))) mode = (mode & S_IRWXG) >> 3; else mode = mode & S_IROTH; return (mode << 6) | (mode << 3) | mode; } static void pid_table_root_set_ownership(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); kuid_t ns_root_uid; kgid_t ns_root_gid; ns_root_uid = make_kuid(pidns->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; ns_root_gid = make_kgid(pidns->user_ns, 0); if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } static struct ctl_table_root pid_table_root = { .lookup = pid_table_root_lookup, .permissions = pid_table_root_permissions, .set_ownership = pid_table_root_set_ownership, }; static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp_pid; int r; struct ctl_table tmp_table = *table; tmp_pid = pid_vnr(cad_pid); tmp_table.data = &tmp_pid; r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); if (r || !write) return r; new_pid = find_get_pid(tmp_pid); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } static const struct ctl_table pid_table[] = { { .procname = "pid_max", .data = &init_pid_ns.pid_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif }; #endif int register_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL struct ctl_table *tbl; setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen); tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL); if (!tbl) return -ENOMEM; tbl->data = &pidns->pid_max; pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl, ARRAY_SIZE(pid_table)); if (!pidns->sysctls) { kfree(tbl); retire_sysctl_set(&pidns->set); return -ENOMEM; } #endif return 0; } void unregister_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL const struct ctl_table *tbl; tbl = pidns->sysctls->ctl_table_arg; unregister_sysctl_table(pidns->sysctls); retire_sysctl_set(&pidns->set); kfree(tbl); #endif } void __init pid_idr_init(void) { /* Verify no one has done anything silly: */ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); /* bump default and minimum pid_max based on number of cpus */ init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pid_max_min = max_t(int, pid_max_min, PIDS_PER_CPU_MIN * num_possible_cpus()); pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min); idr_init(&init_pid_ns.idr); init_pid_ns.pid_cachep = kmem_cache_create("pid", struct_size_t(struct pid, numbers, 1), __alignof__(struct pid), SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); } static __init int pid_namespace_sysctl_init(void) { #ifdef CONFIG_SYSCTL /* "kernel" directory will have already been initialized. */ BUG_ON(register_pidns_sysctls(&init_pid_ns)); #endif return 0; } subsys_initcall(pid_namespace_sysctl_init); static struct file *__pidfd_fget(struct task_struct *task, int fd) { struct file *file; int ret; ret = down_read_killable(&task->signal->exec_update_lock); if (ret) return ERR_PTR(ret); if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) file = fget_task(task, fd); else file = ERR_PTR(-EPERM); up_read(&task->signal->exec_update_lock); if (!file) { /* * It is possible that the target thread is exiting; it can be * either: * 1. before exit_signals(), which gives a real fd * 2. before exit_files() takes the task_lock() gives a real fd * 3. after exit_files() releases task_lock(), ->files is NULL; * this has PF_EXITING, since it was set in exit_signals(), * __pidfd_fget() returns EBADF. * In case 3 we get EBADF, but that really means ESRCH, since * the task is currently exiting and has freed its files * struct, so we fix it up. */ if (task->flags & PF_EXITING) file = ERR_PTR(-ESRCH); else file = ERR_PTR(-EBADF); } return file; } static int pidfd_getfd(struct pid *pid, int fd) { struct task_struct *task; struct file *file; int ret; task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; file = __pidfd_fget(task, fd); put_task_struct(task); if (IS_ERR(file)) return PTR_ERR(file); ret = receive_fd(file, NULL, O_CLOEXEC); fput(file); return ret; } /** * sys_pidfd_getfd() - Get a file descriptor from another process * * @pidfd: the pidfd file descriptor of the process * @fd: the file descriptor number to get * @flags: flags on how to get the fd (reserved) * * This syscall gets a copy of a file descriptor from another process * based on the pidfd, and file descriptor number. It requires that * the calling process has the ability to ptrace the process represented * by the pidfd. The process which is having its file descriptor copied * is otherwise unaffected. * * Return: On success, a cloexec file descriptor is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, unsigned int, flags) { struct pid *pid; /* flags is currently unused - make sure it's unset */ if (flags) return -EINVAL; CLASS(fd, f)(pidfd); if (fd_empty(f)) return -EBADF; pid = pidfd_pid(fd_file(f)); if (IS_ERR(pid)) return PTR_ERR(pid); return pidfd_getfd(pid, fd); } |
| 251 457 2072 2153 1817 9 3223 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Security-Enhanced Linux (SELinux) security module * * This file contains the SELinux security data structures for kernel objects. * * Author(s): Stephen Smalley, <stephen.smalley.work@gmail.com> * Chris Vance, <cvance@nai.com> * Wayne Salamon, <wsalamon@nai.com> * James Morris <jmorris@redhat.com> * * Copyright (C) 2001,2002 Networks Associates Technology, Inc. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * Copyright (C) 2016 Mellanox Technologies */ #ifndef _SELINUX_OBJSEC_H_ #define _SELINUX_OBJSEC_H_ #include <linux/list.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/binfmts.h> #include <linux/in.h> #include <linux/spinlock.h> #include <linux/lsm_hooks.h> #include <linux/msg.h> #include <net/net_namespace.h> #include <linux/bpf.h> #include "flask.h" #include "avc.h" struct avdc_entry { u32 isid; /* inode SID */ u32 allowed; /* allowed permission bitmask */ u32 audited; /* audited permission bitmask */ bool permissive; /* AVC permissive flag */ }; struct cred_security_struct { u32 osid; /* SID prior to last execve */ u32 sid; /* current SID */ u32 exec_sid; /* exec SID */ u32 create_sid; /* fscreate SID */ u32 keycreate_sid; /* keycreate SID */ u32 sockcreate_sid; /* fscreate SID */ } __randomize_layout; struct task_security_struct { #define TSEC_AVDC_DIR_SIZE (1 << 2) struct { u32 sid; /* current SID for cached entries */ u32 seqno; /* AVC sequence number */ unsigned int dir_spot; /* dir cache index to check first */ struct avdc_entry dir[TSEC_AVDC_DIR_SIZE]; /* dir entries */ bool permissive_neveraudit; /* permissive and neveraudit */ } avdcache; } __randomize_layout; static inline bool task_avdcache_permnoaudit(struct task_security_struct *tsec, u32 sid) { return (tsec->avdcache.permissive_neveraudit && sid == tsec->avdcache.sid && tsec->avdcache.seqno == avc_policy_seqno()); } enum label_initialized { LABEL_INVALID, /* invalid or not initialized */ LABEL_INITIALIZED, /* initialized */ LABEL_PENDING }; struct inode_security_struct { struct inode *inode; /* back pointer to inode object */ struct list_head list; /* list of inode_security_struct */ u32 task_sid; /* SID of creating task */ u32 sid; /* SID of this object */ u16 sclass; /* security class of this object */ unsigned char initialized; /* initialization flag */ spinlock_t lock; }; struct file_security_struct { u32 sid; /* SID of open file description */ u32 fown_sid; /* SID of file owner (for SIGIO) */ u32 isid; /* SID of inode at the time of file open */ u32 pseqno; /* Policy seqno at the time of file open */ }; struct superblock_security_struct { u32 sid; /* SID of file system superblock */ u32 def_sid; /* default SID for labeling */ u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */ unsigned short behavior; /* labeling behavior */ unsigned short flags; /* which mount options were specified */ struct mutex lock; struct list_head isec_head; spinlock_t isec_lock; }; struct msg_security_struct { u32 sid; /* SID of message */ }; struct ipc_security_struct { u16 sclass; /* security class of this object */ u32 sid; /* SID of IPC resource */ }; struct netif_security_struct { const struct net *ns; /* network namespace */ int ifindex; /* device index */ u32 sid; /* SID for this interface */ }; struct netnode_security_struct { union { __be32 ipv4; /* IPv4 node address */ struct in6_addr ipv6; /* IPv6 node address */ } addr; u32 sid; /* SID for this node */ u16 family; /* address family */ }; struct netport_security_struct { u32 sid; /* SID for this node */ u16 port; /* port number */ u8 protocol; /* transport protocol */ }; struct sk_security_struct { #ifdef CONFIG_NETLABEL enum { /* NetLabel state */ NLBL_UNSET = 0, NLBL_REQUIRE, NLBL_LABELED, NLBL_REQSKB, NLBL_CONNLABELED, } nlbl_state; struct netlbl_lsm_secattr *nlbl_secattr; /* NetLabel sec attributes */ #endif u32 sid; /* SID of this object */ u32 peer_sid; /* SID of peer */ u16 sclass; /* sock security class */ enum { /* SCTP association state */ SCTP_ASSOC_UNSET = 0, SCTP_ASSOC_SET, } sctp_assoc_state; }; struct tun_security_struct { u32 sid; /* SID for the tun device sockets */ }; struct key_security_struct { u32 sid; /* SID of key */ }; struct ib_security_struct { u32 sid; /* SID of the queue pair or MAD agent */ }; struct pkey_security_struct { u64 subnet_prefix; /* Port subnet prefix */ u16 pkey; /* PKey number */ u32 sid; /* SID of pkey */ }; struct bpf_security_struct { u32 sid; /* SID of bpf obj creator */ }; struct perf_event_security_struct { u32 sid; /* SID of perf_event obj creator */ }; extern struct lsm_blob_sizes selinux_blob_sizes; static inline struct cred_security_struct *selinux_cred(const struct cred *cred) { return cred->security + selinux_blob_sizes.lbs_cred; } static inline struct task_security_struct * selinux_task(const struct task_struct *task) { return task->security + selinux_blob_sizes.lbs_task; } static inline struct file_security_struct *selinux_file(const struct file *file) { return file->f_security + selinux_blob_sizes.lbs_file; } static inline struct inode_security_struct * selinux_inode(const struct inode *inode) { if (unlikely(!inode->i_security)) return NULL; return inode->i_security + selinux_blob_sizes.lbs_inode; } static inline struct msg_security_struct * selinux_msg_msg(const struct msg_msg *msg_msg) { return msg_msg->security + selinux_blob_sizes.lbs_msg_msg; } static inline struct ipc_security_struct * selinux_ipc(const struct kern_ipc_perm *ipc) { return ipc->security + selinux_blob_sizes.lbs_ipc; } /* * get the subjective security ID of the current task */ static inline u32 current_sid(void) { const struct cred_security_struct *crsec = selinux_cred(current_cred()); return crsec->sid; } static inline struct superblock_security_struct * selinux_superblock(const struct super_block *superblock) { return superblock->s_security + selinux_blob_sizes.lbs_superblock; } #ifdef CONFIG_KEYS static inline struct key_security_struct *selinux_key(const struct key *key) { return key->security + selinux_blob_sizes.lbs_key; } #endif /* CONFIG_KEYS */ static inline struct sk_security_struct *selinux_sock(const struct sock *sock) { return sock->sk_security + selinux_blob_sizes.lbs_sock; } static inline struct tun_security_struct *selinux_tun_dev(void *security) { return security + selinux_blob_sizes.lbs_tun_dev; } static inline struct ib_security_struct *selinux_ib(void *ib_sec) { return ib_sec + selinux_blob_sizes.lbs_ib; } static inline struct perf_event_security_struct * selinux_perf_event(void *perf_event) { return perf_event + selinux_blob_sizes.lbs_perf_event; } #ifdef CONFIG_BPF_SYSCALL static inline struct bpf_security_struct * selinux_bpf_map_security(struct bpf_map *map) { return map->security + selinux_blob_sizes.lbs_bpf_map; } static inline struct bpf_security_struct * selinux_bpf_prog_security(struct bpf_prog *prog) { return prog->aux->security + selinux_blob_sizes.lbs_bpf_prog; } static inline struct bpf_security_struct * selinux_bpf_token_security(struct bpf_token *token) { return token->security + selinux_blob_sizes.lbs_bpf_token; } #endif /* CONFIG_BPF_SYSCALL */ #endif /* _SELINUX_OBJSEC_H_ */ |
| 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_INTERNAL_H #define _LINUX_HIGHMEM_INTERNAL_H /* * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. */ #ifdef CONFIG_KMAP_LOCAL void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); void *__kmap_local_page_prot(const struct page *page, pgprot_t prot); void kunmap_local_indexed(const void *vaddr); void kmap_local_fork(struct task_struct *tsk); void __kmap_local_sched_out(void); void __kmap_local_sched_in(void); static inline void kmap_assert_nomap(void) { DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx); } #else static inline void kmap_local_fork(struct task_struct *tsk) { } static inline void kmap_assert_nomap(void) { } #endif #ifdef CONFIG_HIGHMEM #include <asm/highmem.h> #ifndef ARCH_HAS_KMAP_FLUSH_TLB static inline void kmap_flush_tlb(unsigned long addr) { } #endif #ifndef kmap_prot #define kmap_prot PAGE_KERNEL #endif void *kmap_high(struct page *page); void kunmap_high(const struct page *page); void __kmap_flush_unused(void); struct page *__kmap_to_page(void *addr); static inline void *kmap(struct page *page) { void *addr; might_sleep(); if (!PageHighMem(page)) addr = page_address(page); else addr = kmap_high(page); kmap_flush_tlb((unsigned long)addr); return addr; } static inline void kunmap(const struct page *page) { might_sleep(); if (!PageHighMem(page)) return; kunmap_high(page); } static inline struct page *kmap_to_page(void *addr) { return __kmap_to_page(addr); } static inline void kmap_flush_unused(void) { __kmap_flush_unused(); } static inline void *kmap_local_page(const struct page *page) { return __kmap_local_page_prot(page, kmap_prot); } static inline void *kmap_local_page_try_from_panic(const struct page *page) { if (!PageHighMem(page)) return page_address(page); /* If the page is in HighMem, it's not safe to kmap it.*/ return NULL; } static inline void *kmap_local_folio(const struct folio *folio, size_t offset) { const struct page *page = folio_page(folio, offset / PAGE_SIZE); return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE; } static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot) { return __kmap_local_page_prot(page, prot); } static inline void *kmap_local_pfn(unsigned long pfn) { return __kmap_local_pfn_prot(pfn, kmap_prot); } static inline void __kunmap_local(const void *vaddr) { kunmap_local_indexed(vaddr); } static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) migrate_disable(); else preempt_disable(); pagefault_disable(); return __kmap_local_page_prot(page, prot); } static inline void *kmap_atomic(const struct page *page) { return kmap_atomic_prot(page, kmap_prot); } static inline void *kmap_atomic_pfn(unsigned long pfn) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) migrate_disable(); else preempt_disable(); pagefault_disable(); return __kmap_local_pfn_prot(pfn, kmap_prot); } static inline void __kunmap_atomic(const void *addr) { kunmap_local_indexed(addr); pagefault_enable(); if (IS_ENABLED(CONFIG_PREEMPT_RT)) migrate_enable(); else preempt_enable(); } unsigned long __nr_free_highpages(void); unsigned long __totalhigh_pages(void); static inline unsigned long nr_free_highpages(void) { return __nr_free_highpages(); } static inline unsigned long totalhigh_pages(void) { return __totalhigh_pages(); } static inline bool is_kmap_addr(const void *x) { unsigned long addr = (unsigned long)x; return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) || (addr >= __fix_to_virt(FIX_KMAP_END) && addr < __fix_to_virt(FIX_KMAP_BEGIN)); } #else /* CONFIG_HIGHMEM */ static inline struct page *kmap_to_page(void *addr) { return virt_to_page(addr); } static inline void *kmap(struct page *page) { might_sleep(); return page_address(page); } static inline void kunmap_high(const struct page *page) { } static inline void kmap_flush_unused(void) { } static inline void kunmap(const struct page *page) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(page_address(page)); #endif } static inline void *kmap_local_page(const struct page *page) { return page_address(page); } static inline void *kmap_local_page_try_from_panic(const struct page *page) { return page_address(page); } static inline void *kmap_local_folio(const struct folio *folio, size_t offset) { return folio_address(folio) + offset; } static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot) { return kmap_local_page(page); } static inline void *kmap_local_pfn(unsigned long pfn) { return kmap_local_page(pfn_to_page(pfn)); } static inline void __kunmap_local(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE)); #endif } static inline void *kmap_atomic(const struct page *page) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) migrate_disable(); else preempt_disable(); pagefault_disable(); return page_address(page); } static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot) { return kmap_atomic(page); } static inline void *kmap_atomic_pfn(unsigned long pfn) { return kmap_atomic(pfn_to_page(pfn)); } static inline void __kunmap_atomic(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE)); #endif pagefault_enable(); if (IS_ENABLED(CONFIG_PREEMPT_RT)) migrate_enable(); else preempt_enable(); } static inline unsigned long nr_free_highpages(void) { return 0; } static inline unsigned long totalhigh_pages(void) { return 0; } static inline bool is_kmap_addr(const void *x) { return false; } #endif /* CONFIG_HIGHMEM */ /** * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated! * @__addr: Virtual address to be unmapped * * Unmaps an address previously mapped by kmap_atomic() and re-enables * pagefaults. Depending on PREEMP_RT configuration, re-enables also * migration and preemption. Users should not count on these side effects. * * Mappings should be unmapped in the reverse order that they were mapped. * See kmap_local_page() for details on nesting. * * @__addr can be any address within the mapped page, so there is no need * to subtract any offset that has been added. In contrast to kunmap(), * this function takes the address returned from kmap_atomic(), not the * page passed to it. The compiler will warn you if you pass the page. */ #define kunmap_atomic(__addr) \ do { \ BUILD_BUG_ON(__same_type((__addr), struct page *)); \ __kunmap_atomic(__addr); \ } while (0) /** * kunmap_local - Unmap a page mapped via kmap_local_page(). * @__addr: An address within the page mapped * * @__addr can be any address within the mapped page. Commonly it is the * address return from kmap_local_page(), but it can also include offsets. * * Unmapping should be done in the reverse order of the mapping. See * kmap_local_page() for details. */ #define kunmap_local(__addr) \ do { \ BUILD_BUG_ON(__same_type((__addr), struct page *)); \ __kunmap_local(__addr); \ } while (0) #endif |
| 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007, 2008, 2009 Siemens AG * * Written by: * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> */ #ifndef __NET_CFG802154_H #define __NET_CFG802154_H #include <linux/ieee802154.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/bug.h> #include <net/nl802154.h> struct wpan_phy; struct wpan_phy_cca; struct cfg802154_scan_request; struct cfg802154_beacon_request; struct ieee802154_addr; #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL struct ieee802154_llsec_device_key; struct ieee802154_llsec_seclevel; struct ieee802154_llsec_params; struct ieee802154_llsec_device; struct ieee802154_llsec_table; struct ieee802154_llsec_key_id; struct ieee802154_llsec_key; #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ struct cfg802154_ops { struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, const char *name, unsigned char name_assign_type, int type); void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, struct net_device *dev); int (*suspend)(struct wpan_phy *wpan_phy); int (*resume)(struct wpan_phy *wpan_phy); int (*add_virtual_intf)(struct wpan_phy *wpan_phy, const char *name, unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr); int (*del_virtual_intf)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); int (*set_cca_mode)(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca); int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level); int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power); int (*set_pan_id)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id); int (*set_short_addr)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 short_addr); int (*set_backoff_exponent)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 min_be, u8 max_be); int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 max_csma_backoffs); int (*set_max_frame_retries)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, s8 max_frame_retries); int (*set_lbt_mode)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool mode); int (*set_ackreq_default)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool ackreq); int (*trigger_scan)(struct wpan_phy *wpan_phy, struct cfg802154_scan_request *request); int (*abort_scan)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); int (*send_beacons)(struct wpan_phy *wpan_phy, struct cfg802154_beacon_request *request); int (*stop_beacons)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); int (*associate)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_addr *coord); int (*disassociate)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_addr *target); #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL void (*get_llsec_table)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_llsec_table **table); void (*lock_llsec_table)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); void (*unlock_llsec_table)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); /* TODO remove locking/get table callbacks, this is part of the * nl802154 interface and should be accessible from ieee802154 layer. */ int (*get_llsec_params)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_llsec_params *params); int (*set_llsec_params)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_params *params, int changed); int (*add_llsec_key)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_key_id *id, const struct ieee802154_llsec_key *key); int (*del_llsec_key)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_key_id *id); int (*add_seclevel)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_seclevel *sl); int (*del_seclevel)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_seclevel *sl); int (*add_device)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_device *dev); int (*del_device)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr); int (*add_devkey)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr, const struct ieee802154_llsec_device_key *key); int (*del_devkey)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr, const struct ieee802154_llsec_device_key *key); #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; static inline bool wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st) { switch (st) { case NL802154_SUPPORTED_BOOL_TRUE: return b; case NL802154_SUPPORTED_BOOL_FALSE: return !b; case NL802154_SUPPORTED_BOOL_BOTH: return true; default: WARN_ON(1); } return false; } struct wpan_phy_supported { u32 channels[IEEE802154_MAX_PAGE + 1], cca_modes, cca_opts, iftypes; enum nl802154_supported_bool_states lbt; u8 min_minbe, max_minbe, min_maxbe, max_maxbe, min_csma_backoffs, max_csma_backoffs; s8 min_frame_retries, max_frame_retries; size_t tx_powers_size, cca_ed_levels_size; const s32 *tx_powers, *cca_ed_levels; }; struct wpan_phy_cca { enum nl802154_cca_modes mode; enum nl802154_cca_opts opt; }; static inline bool wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) { if (a->mode != b->mode) return false; if (a->mode == NL802154_CCA_ENERGY_CARRIER) return a->opt == b->opt; return true; } /** * enum wpan_phy_flags - WPAN PHY state flags * @WPAN_PHY_FLAG_TXPOWER: Indicates that transceiver will support * transmit power setting. * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed * level setting. * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode * setting. * @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was * temporarily stopped. * @WPAN_PHY_FLAG_DATAGRAMS_ONLY: Indicates that transceiver is only able to * send/receive datagrams. */ enum wpan_phy_flags { WPAN_PHY_FLAG_TXPOWER = BIT(1), WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2), WPAN_PHY_FLAG_CCA_MODE = BIT(3), WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4), WPAN_PHY_FLAG_DATAGRAMS_ONLY = BIT(5), }; struct wpan_phy { /* If multiple wpan_phys are registered and you're handed e.g. * a regular netdev with assigned ieee802154_ptr, you won't * know whether it points to a wpan_phy your driver has registered * or not. Assign this to something global to your driver to * help determine whether you own this wpan_phy or not. */ const void *privid; unsigned long flags; /* * This is a PIB according to 802.15.4-2011. * We do not provide timing-related variables, as they * aren't used outside of driver */ u8 current_channel; u8 current_page; struct wpan_phy_supported supported; /* current transmit_power in mBm */ s32 transmit_power; struct wpan_phy_cca cca; __le64 perm_extended_addr; /* current cca ed threshold in mBm */ s32 cca_ed_level; /* PHY depended MAC PIB values */ /* 802.15.4 acronym: Tdsym in nsec */ u32 symbol_duration; /* lifs and sifs periods timing */ u16 lifs_period; u16 sifs_period; struct device dev; /* the network namespace this phy lives in currently */ possible_net_t _net; /* Transmission monitoring and control */ spinlock_t queue_lock; atomic_t ongoing_txs; atomic_t hold_txs; wait_queue_head_t sync_txq; /* Current filtering level on reception. * Only allowed to be changed if phy is not operational. */ enum ieee802154_filtering_level filtering; char priv[] __aligned(NETDEV_ALIGN); }; static inline struct net *wpan_phy_net(struct wpan_phy *wpan_phy) { return read_pnet(&wpan_phy->_net); } static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net) { write_pnet(&wpan_phy->_net, net); } static inline bool ieee802154_chan_is_valid(struct wpan_phy *phy, u8 page, u8 channel) { if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL || !(phy->supported.channels[page] & BIT(channel))) return false; return true; } /** * struct ieee802154_addr - IEEE802.15.4 device address * @mode: Address mode from frame header. Can be one of: * - @IEEE802154_ADDR_NONE * - @IEEE802154_ADDR_SHORT * - @IEEE802154_ADDR_LONG * @pan_id: The PAN ID this address belongs to * @short_addr: address if @mode is @IEEE802154_ADDR_SHORT * @extended_addr: address if @mode is @IEEE802154_ADDR_LONG */ struct ieee802154_addr { u8 mode; __le16 pan_id; union { __le16 short_addr; __le64 extended_addr; }; }; /** * struct ieee802154_coord_desc - Coordinator descriptor * @addr: PAN ID and coordinator address * @page: page this coordinator is using * @channel: channel this coordinator is using * @superframe_spec: SuperFrame specification as received * @link_quality: link quality indicator at which the beacon was received * @gts_permit: the coordinator accepts GTS requests */ struct ieee802154_coord_desc { struct ieee802154_addr addr; u8 page; u8 channel; u16 superframe_spec; u8 link_quality; bool gts_permit; }; /** * struct ieee802154_pan_device - PAN device information * @pan_id: the PAN ID of this device * @mode: the preferred mode to reach the device * @short_addr: the short address of this device * @extended_addr: the extended address of this device * @node: the list node */ struct ieee802154_pan_device { __le16 pan_id; u8 mode; __le16 short_addr; __le64 extended_addr; struct list_head node; }; /** * struct cfg802154_scan_request - Scan request * * @type: type of scan to be performed * @page: page on which to perform the scan * @channels: channels in te %page to be scanned * @duration: time spent on each channel, calculated with: * aBaseSuperframeDuration * (2 ^ duration + 1) * @wpan_dev: the wpan device on which to perform the scan * @wpan_phy: the wpan phy on which to perform the scan */ struct cfg802154_scan_request { enum nl802154_scan_types type; u8 page; u32 channels; u8 duration; struct wpan_dev *wpan_dev; struct wpan_phy *wpan_phy; }; /** * struct cfg802154_beacon_request - Beacon request descriptor * * @interval: interval n between sendings, in multiple order of the super frame * duration: aBaseSuperframeDuration * (2^n) unless the interval * order is greater or equal to 15, in this case beacons won't be * passively sent out at a fixed rate but instead inform the device * that it should answer beacon requests as part of active scan * procedures * @wpan_dev: the concerned wpan device * @wpan_phy: the wpan phy this was for */ struct cfg802154_beacon_request { u8 interval; struct wpan_dev *wpan_dev; struct wpan_phy *wpan_phy; }; /** * struct cfg802154_mac_pkt - MAC packet descriptor (beacon/command) * @node: MAC packets to process list member * @skb: the received sk_buff * @sdata: the interface on which @skb was received * @page: page configuration when @skb was received * @channel: channel configuration when @skb was received */ struct cfg802154_mac_pkt { struct list_head node; struct sk_buff *skb; struct ieee802154_sub_if_data *sdata; u8 page; u8 channel; }; struct ieee802154_llsec_key_id { u8 mode; u8 id; union { struct ieee802154_addr device_addr; __le32 short_source; __le64 extended_source; }; }; #define IEEE802154_LLSEC_KEY_SIZE 16 struct ieee802154_llsec_key { u8 frame_types; u32 cmd_frame_ids; /* TODO replace with NL802154_KEY_SIZE */ u8 key[IEEE802154_LLSEC_KEY_SIZE]; }; struct ieee802154_llsec_key_entry { struct list_head list; struct rcu_head rcu; struct ieee802154_llsec_key_id id; struct ieee802154_llsec_key *key; }; struct ieee802154_llsec_params { bool enabled; __be32 frame_counter; u8 out_level; struct ieee802154_llsec_key_id out_key; __le64 default_key_source; __le16 pan_id; __le64 hwaddr; __le64 coord_hwaddr; __le16 coord_shortaddr; }; struct ieee802154_llsec_table { struct list_head keys; struct list_head devices; struct list_head security_levels; }; struct ieee802154_llsec_seclevel { struct list_head list; u8 frame_type; u8 cmd_frame_id; bool device_override; u32 sec_levels; }; struct ieee802154_llsec_device { struct list_head list; __le16 pan_id; __le16 short_addr; __le64 hwaddr; u32 frame_counter; bool seclevel_exempt; u8 key_mode; struct list_head keys; }; struct ieee802154_llsec_device_key { struct list_head list; struct ieee802154_llsec_key_id key_id; u32 frame_counter; }; struct wpan_dev_header_ops { /* TODO create callback currently assumes ieee802154_mac_cb inside * skb->cb. This should be changed to give these information as * parameter. */ int (*create)(struct sk_buff *skb, struct net_device *dev, const struct ieee802154_addr *daddr, const struct ieee802154_addr *saddr, unsigned int len); }; struct wpan_dev { struct wpan_phy *wpan_phy; int iftype; /* the remainder of this struct should be private to cfg802154 */ struct list_head list; struct net_device *netdev; const struct wpan_dev_header_ops *header_ops; /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */ struct net_device *lowpan_dev; u32 identifier; /* MAC PIB */ __le16 pan_id; __le16 short_addr; __le64 extended_addr; /* MAC BSN field */ atomic_t bsn; /* MAC DSN field */ atomic_t dsn; u8 min_be; u8 max_be; u8 csma_retries; s8 frame_retries; bool lbt; /* fallback for acknowledgment bit setting */ bool ackreq; /* Associations */ struct mutex association_lock; struct ieee802154_pan_device *parent; struct list_head children; unsigned int max_associations; unsigned int nchildren; }; #define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) static inline int wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, const struct ieee802154_addr *daddr, const struct ieee802154_addr *saddr, unsigned int len) { struct wpan_dev *wpan_dev = dev->ieee802154_ptr; return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len); } #endif struct wpan_phy * wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) { phy->dev.parent = dev; } int wpan_phy_register(struct wpan_phy *phy); void wpan_phy_unregister(struct wpan_phy *phy); void wpan_phy_free(struct wpan_phy *phy); /* Same semantics as for class_for_each_device */ int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); static inline void *wpan_phy_priv(struct wpan_phy *phy) { BUG_ON(!phy); return &phy->priv; } struct wpan_phy *wpan_phy_find(const char *str); static inline void wpan_phy_put(struct wpan_phy *phy) { put_device(&phy->dev); } static inline const char *wpan_phy_name(struct wpan_phy *phy) { return dev_name(&phy->dev); } void ieee802154_configure_durations(struct wpan_phy *phy, unsigned int page, unsigned int channel); /** * cfg802154_device_is_associated - Checks whether we are associated to any device * @wpan_dev: the wpan device * @return: true if we are associated */ bool cfg802154_device_is_associated(struct wpan_dev *wpan_dev); /** * cfg802154_device_is_parent - Checks if a device is our coordinator * @wpan_dev: the wpan device * @target: the expected parent * @return: true if @target is our coordinator */ bool cfg802154_device_is_parent(struct wpan_dev *wpan_dev, struct ieee802154_addr *target); /** * cfg802154_device_is_child - Checks whether a device is associated to us * @wpan_dev: the wpan device * @target: the expected child * @return: the PAN device */ struct ieee802154_pan_device * cfg802154_device_is_child(struct wpan_dev *wpan_dev, struct ieee802154_addr *target); /** * cfg802154_set_max_associations - Limit the number of future associations * @wpan_dev: the wpan device * @max: the maximum number of devices we accept to associate * @return: the old maximum value */ unsigned int cfg802154_set_max_associations(struct wpan_dev *wpan_dev, unsigned int max); /** * cfg802154_get_free_short_addr - Get a free address among the known devices * @wpan_dev: the wpan device * @return: a random short address expectedly unused on our PAN */ __le16 cfg802154_get_free_short_addr(struct wpan_dev *wpan_dev); #endif /* __NET_CFG802154_H */ |
| 10 10 10 10 7 7 7 7 10 10 7 7 7 7 7 7 3 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007-2012 Siemens AG * * Written by: * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <net/netlink.h> #include <net/nl802154.h> #include <net/mac802154.h> #include <net/ieee802154_netdev.h> #include <net/route.h> #include <net/cfg802154.h> #include "ieee802154_i.h" #include "cfg.h" static void ieee802154_tasklet_handler(struct tasklet_struct *t) { struct ieee802154_local *local = from_tasklet(local, t, tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue))) { switch (skb->pkt_type) { case IEEE802154_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee802154_rx(local, skb); break; default: WARN(1, "mac802154: Packet is of unknown type %d\n", skb->pkt_type); kfree_skb(skb); break; } } } struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) { struct wpan_phy *phy; struct ieee802154_local *local; size_t priv_size; if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed || !ops->start || !ops->stop || !ops->set_channel)) return NULL; /* Ensure 32-byte alignment of our private data and hw private data. * We use the wpan_phy priv data for both our ieee802154_local and for * the driver's private data * * in memory it'll be like this: * * +-------------------------+ * | struct wpan_phy | * +-------------------------+ * | struct ieee802154_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * * Due to ieee802154 layer isn't aware of driver and MAC structures, * so lets align them here. */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; phy = wpan_phy_new(&mac802154_config_ops, priv_size); if (!phy) { pr_err("failure to allocate master IEEE802.15.4 device\n"); return NULL; } phy->privid = mac802154_wpan_phy_privid; local = wpan_phy_priv(phy); local->phy = phy; local->hw.phy = local->phy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->rx_beacon_list); INIT_LIST_HEAD(&local->rx_mac_cmd_list); mutex_init(&local->iflist_mtx); tasklet_setup(&local->tasklet, ieee802154_tasklet_handler); skb_queue_head_init(&local->skb_queue); INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker); INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker); INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker); INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker); INIT_WORK(&local->rx_mac_cmd_work, mac802154_rx_mac_cmd_worker); init_completion(&local->assoc_done); /* init supported flags with 802.15.4 default ranges */ phy->supported.max_minbe = 8; phy->supported.min_maxbe = 3; phy->supported.max_maxbe = 8; phy->supported.min_frame_retries = 0; phy->supported.max_frame_retries = 7; phy->supported.max_csma_backoffs = 5; phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; /* always supported */ phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE) | BIT(NL802154_IFTYPE_COORD); return &local->hw; } EXPORT_SYMBOL(ieee802154_alloc_hw); void ieee802154_configure_durations(struct wpan_phy *phy, unsigned int page, unsigned int channel) { u32 duration = 0; switch (page) { case 0: if (BIT(channel) & 0x1) /* 868 MHz BPSK 802.15.4-2003: 20 ksym/s */ duration = 50 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FE) /* 915 MHz BPSK 802.15.4-2003: 40 ksym/s */ duration = 25 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FFF800) /* 2400 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */ duration = 16 * NSEC_PER_USEC; break; case 2: if (BIT(channel) & 0x1) /* 868 MHz O-QPSK 802.15.4-2006: 25 ksym/s */ duration = 40 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FE) /* 915 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */ duration = 16 * NSEC_PER_USEC; break; case 3: if (BIT(channel) & 0x3FFF) /* 2.4 GHz CSS 802.15.4a-2007: 1/6 Msym/s */ duration = 6 * NSEC_PER_USEC; break; default: break; } if (!duration) { pr_debug("Unknown PHY symbol duration\n"); return; } phy->symbol_duration = duration; phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC; phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC; } EXPORT_SYMBOL(ieee802154_configure_durations); void ieee802154_free_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); BUG_ON(!list_empty(&local->interfaces)); mutex_destroy(&local->iflist_mtx); wpan_phy_free(local->phy); } EXPORT_SYMBOL(ieee802154_free_hw); static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy) { /* TODO warn on empty symbol_duration * Should be done when all drivers sets this value. */ wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / NSEC_PER_USEC; wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / NSEC_PER_USEC; } int ieee802154_register_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); char mac_wq_name[IFNAMSIZ + 10] = {}; struct net_device *dev; int rc = -ENOSYS; local->workqueue = create_singlethread_workqueue(wpan_phy_name(local->phy)); if (!local->workqueue) { rc = -ENOMEM; goto out; } snprintf(mac_wq_name, IFNAMSIZ + 10, "%s-mac-cmds", wpan_phy_name(local->phy)); local->mac_wq = create_singlethread_workqueue(mac_wq_name); if (!local->mac_wq) { rc = -ENOMEM; goto out_wq; } hrtimer_setup(&local->ifs_timer, ieee802154_xmit_ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wpan_phy_set_dev(local->phy, local->hw.parent); ieee802154_setup_wpan_phy_pib(local->phy); ieee802154_configure_durations(local->phy, local->phy->current_page, local->phy->current_channel); if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) { local->phy->supported.min_csma_backoffs = 4; local->phy->supported.max_csma_backoffs = 4; local->phy->supported.min_maxbe = 5; local->phy->supported.max_maxbe = 5; local->phy->supported.min_minbe = 3; local->phy->supported.max_minbe = 3; } if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) { local->phy->supported.min_frame_retries = 3; local->phy->supported.max_frame_retries = 3; } if (hw->flags & IEEE802154_HW_PROMISCUOUS) local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR); rc = wpan_phy_register(local->phy); if (rc < 0) goto out_mac_wq; rtnl_lock(); dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM, NL802154_IFTYPE_NODE, cpu_to_le64(0x0000000000000000ULL)); if (IS_ERR(dev)) { rtnl_unlock(); rc = PTR_ERR(dev); goto out_phy; } rtnl_unlock(); return 0; out_phy: wpan_phy_unregister(local->phy); out_mac_wq: destroy_workqueue(local->mac_wq); out_wq: destroy_workqueue(local->workqueue); out: return rc; } EXPORT_SYMBOL(ieee802154_register_hw); void ieee802154_unregister_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); tasklet_kill(&local->tasklet); flush_workqueue(local->workqueue); rtnl_lock(); ieee802154_remove_interfaces(local); rtnl_unlock(); destroy_workqueue(local->mac_wq); destroy_workqueue(local->workqueue); wpan_phy_unregister(local->phy); } EXPORT_SYMBOL(ieee802154_unregister_hw); static int __init ieee802154_init(void) { return ieee802154_iface_init(); } static void __exit ieee802154_exit(void) { ieee802154_iface_exit(); rcu_barrier(); } subsys_initcall(ieee802154_init); module_exit(ieee802154_exit); MODULE_DESCRIPTION("IEEE 802.15.4 subsystem"); MODULE_LICENSE("GPL v2"); |
| 7 22 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/pagevec.h * * In many places it is efficient to batch an operation up against multiple * folios. A folio_batch is a container which is used for that. */ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H #include <linux/types.h> /* 31 pointers + header align the folio_batch structure to a power of two */ #define PAGEVEC_SIZE 31 struct folio; /** * struct folio_batch - A collection of folios. * * The folio_batch is used to amortise the cost of retrieving and * operating on a set of folios. The order of folios in the batch may be * significant (eg delete_from_page_cache_batch()). Some users of the * folio_batch store "exceptional" entries in it which can be removed * by calling folio_batch_remove_exceptionals(). */ struct folio_batch { unsigned char nr; unsigned char i; bool percpu_pvec_drained; struct folio *folios[PAGEVEC_SIZE]; }; /** * folio_batch_init() - Initialise a batch of folios * @fbatch: The folio batch. * * A freshly initialised folio_batch contains zero folios. */ static inline void folio_batch_init(struct folio_batch *fbatch) { fbatch->nr = 0; fbatch->i = 0; fbatch->percpu_pvec_drained = false; } static inline void folio_batch_reinit(struct folio_batch *fbatch) { fbatch->nr = 0; fbatch->i = 0; } static inline unsigned int folio_batch_count(const struct folio_batch *fbatch) { return fbatch->nr; } static inline unsigned int folio_batch_space(const struct folio_batch *fbatch) { return PAGEVEC_SIZE - fbatch->nr; } /** * folio_batch_add() - Add a folio to a batch. * @fbatch: The folio batch. * @folio: The folio to add. * * The folio is added to the end of the batch. * The batch must have previously been initialised using folio_batch_init(). * * Return: The number of slots still available. */ static inline unsigned folio_batch_add(struct folio_batch *fbatch, struct folio *folio) { fbatch->folios[fbatch->nr++] = folio; return folio_batch_space(fbatch); } /** * folio_batch_next - Return the next folio to process. * @fbatch: The folio batch being processed. * * Use this function to implement a queue of folios. * * Return: The next folio in the queue, or NULL if the queue is empty. */ static inline struct folio *folio_batch_next(struct folio_batch *fbatch) { if (fbatch->i == fbatch->nr) return NULL; return fbatch->folios[fbatch->i++]; } void __folio_batch_release(struct folio_batch *pvec); static inline void folio_batch_release(struct folio_batch *fbatch) { if (folio_batch_count(fbatch)) __folio_batch_release(fbatch); } void folio_batch_remove_exceptionals(struct folio_batch *fbatch); #endif /* _LINUX_PAGEVEC_H */ |
| 3226 3220 3227 3208 3211 1570 3213 3222 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | // SPDX-License-Identifier: GPL-2.0 /* * linux/lib/kasprintf.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/stdarg.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> /* Simplified asprintf. */ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int first, second; char *p; va_list aq; va_copy(aq, ap); first = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc_track_caller(first+1, gfp); if (!p) return NULL; second = vsnprintf(p, first+1, fmt, ap); WARN(first != second, "different return values (%u and %u) from vsnprintf(\"%s\", ...)", first, second, fmt); return p; } EXPORT_SYMBOL(kvasprintf); /* * If fmt contains no % (or is exactly %s), use kstrdup_const. If fmt * (or the sole vararg) points to rodata, we will then save a memory * allocation and string copy. In any case, the return value should be * freed using kfree_const(). */ const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap) { if (!strchr(fmt, '%')) return kstrdup_const(fmt, gfp); if (!strcmp(fmt, "%s")) return kstrdup_const(va_arg(ap, const char*), gfp); return kvasprintf(gfp, fmt, ap); } EXPORT_SYMBOL(kvasprintf_const); char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); |
| 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 | // SPDX-License-Identifier: GPL-2.0+ /* * LEGO USB Tower driver * * Copyright (C) 2003 David Glance <davidgsf@sourceforge.net> * 2001-2004 Juergen Stuber <starblue@users.sourceforge.net> * * derived from USB Skeleton driver - 0.5 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * * History: * * 2001-10-13 - 0.1 js * - first version * 2001-11-03 - 0.2 js * - simplified buffering, one-shot URBs for writing * 2001-11-10 - 0.3 js * - removed IOCTL (setting power/mode is more complicated, postponed) * 2001-11-28 - 0.4 js * - added vendor commands for mode of operation and power level in open * 2001-12-04 - 0.5 js * - set IR mode by default (by oversight 0.4 set VLL mode) * 2002-01-11 - 0.5? pcchan * - make read buffer reusable and work around bytes_to_write issue between * uhci and legusbtower * 2002-09-23 - 0.52 david (david@csse.uwa.edu.au) * - imported into lejos project * - changed wake_up to wake_up_interruptible * - changed to use lego0 rather than tower0 * - changed dbg() to use __func__ rather than deprecated __func__ * 2003-01-12 - 0.53 david (david@csse.uwa.edu.au) * - changed read and write to write everything or * timeout (from a patch by Chris Riesen and Brett Thaeler driver) * - added ioctl functionality to set timeouts * 2003-07-18 - 0.54 davidgsf (david@csse.uwa.edu.au) * - initial import into LegoUSB project * - merge of existing LegoUSB.c driver * 2003-07-18 - 0.56 davidgsf (david@csse.uwa.edu.au) * - port to 2.6 style driver * 2004-02-29 - 0.6 Juergen Stuber <starblue@users.sourceforge.net> * - fix locking * - unlink read URBs which are no longer needed * - allow increased buffer size, eliminates need for timeout on write * - have read URB running continuously * - added poll * - forbid seeking * - added nonblocking I/O * - changed back __func__ to __func__ * - read and log tower firmware version * - reset tower on probe, avoids failure of first write * 2004-03-09 - 0.7 Juergen Stuber <starblue@users.sourceforge.net> * - timeout read now only after inactivity, shorten default accordingly * 2004-03-11 - 0.8 Juergen Stuber <starblue@users.sourceforge.net> * - log major, minor instead of possibly confusing device filename * - whitespace cleanup * 2004-03-12 - 0.9 Juergen Stuber <starblue@users.sourceforge.net> * - normalize whitespace in debug messages * - take care about endianness in control message responses * 2004-03-13 - 0.91 Juergen Stuber <starblue@users.sourceforge.net> * - make default intervals longer to accommodate current EHCI driver * 2004-03-19 - 0.92 Juergen Stuber <starblue@users.sourceforge.net> * - replaced atomic_t by memory barriers * 2004-04-21 - 0.93 Juergen Stuber <starblue@users.sourceforge.net> * - wait for completion of write urb in release (needed for remotecontrol) * - corrected poll for write direction (missing negation) * 2004-04-22 - 0.94 Juergen Stuber <starblue@users.sourceforge.net> * - make device locking interruptible * 2004-04-30 - 0.95 Juergen Stuber <starblue@users.sourceforge.net> * - check for valid udev on resubmitting and unlinking urbs * 2004-08-03 - 0.96 Juergen Stuber <starblue@users.sourceforge.net> * - move reset into open to clean out spurious data */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/poll.h> #define DRIVER_AUTHOR "Juergen Stuber <starblue@sourceforge.net>" #define DRIVER_DESC "LEGO USB Tower Driver" /* The defaults are chosen to work with the latest versions of leJOS and NQC. */ /* Some legacy software likes to receive packets in one piece. * In this case read_buffer_size should exceed the maximal packet length * (417 for datalog uploads), and packet_timeout should be set. */ static int read_buffer_size = 480; module_param(read_buffer_size, int, 0); MODULE_PARM_DESC(read_buffer_size, "Read buffer size"); /* Some legacy software likes to send packets in one piece. * In this case write_buffer_size should exceed the maximal packet length * (417 for firmware and program downloads). * A problem with long writes is that the following read may time out * if the software is not prepared to wait long enough. */ static int write_buffer_size = 480; module_param(write_buffer_size, int, 0); MODULE_PARM_DESC(write_buffer_size, "Write buffer size"); /* Some legacy software expects reads to contain whole LASM packets. * To achieve this, characters which arrive before a packet timeout * occurs will be returned in a single read operation. * A problem with long reads is that the software may time out * if it is not prepared to wait long enough. * The packet timeout should be greater than the time between the * reception of subsequent characters, which should arrive about * every 5ms for the standard 2400 baud. * Set it to 0 to disable. */ static int packet_timeout = 50; module_param(packet_timeout, int, 0); MODULE_PARM_DESC(packet_timeout, "Packet timeout in ms"); /* Some legacy software expects blocking reads to time out. * Timeout occurs after the specified time of read and write inactivity. * Set it to 0 to disable. */ static int read_timeout = 200; module_param(read_timeout, int, 0); MODULE_PARM_DESC(read_timeout, "Read timeout in ms"); /* As of kernel version 2.6.4 ehci-hcd uses an * "only one interrupt transfer per frame" shortcut * to simplify the scheduling of periodic transfers. * This conflicts with our standard 1ms intervals for in and out URBs. * We use default intervals of 2ms for in and 8ms for out transfers, * which is fast enough for 2400 baud and allows a small additional load. * Increase the interval to allow more devices that do interrupt transfers, * or set to 0 to use the standard interval from the endpoint descriptors. */ static int interrupt_in_interval = 2; module_param(interrupt_in_interval, int, 0); MODULE_PARM_DESC(interrupt_in_interval, "Interrupt in interval in ms"); static int interrupt_out_interval = 8; module_param(interrupt_out_interval, int, 0); MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms"); /* Define these values to match your device */ #define LEGO_USB_TOWER_VENDOR_ID 0x0694 #define LEGO_USB_TOWER_PRODUCT_ID 0x0001 /* Vendor requests */ #define LEGO_USB_TOWER_REQUEST_RESET 0x04 #define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD struct tower_reset_reply { __le16 size; __u8 err_code; __u8 spare; }; struct tower_get_version_reply { __le16 size; __u8 err_code; __u8 spare; __u8 major; __u8 minor; __le16 build_no; }; /* table of devices that work with this driver */ static const struct usb_device_id tower_table[] = { { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, tower_table); #define LEGO_USB_TOWER_MINOR_BASE 160 /* Structure to hold all of our device specific stuff */ struct lego_usb_tower { struct mutex lock; /* locks this structure */ struct usb_device *udev; /* save off the usb device pointer */ unsigned char minor; /* the starting minor number for this device */ int open_count; /* number of times this port has been opened */ unsigned long disconnected:1; char *read_buffer; size_t read_buffer_length; /* this much came in */ size_t read_packet_length; /* this much will be returned on read */ spinlock_t read_buffer_lock; int packet_timeout_jiffies; unsigned long read_last_arrival; wait_queue_head_t read_wait; wait_queue_head_t write_wait; char *interrupt_in_buffer; struct usb_endpoint_descriptor *interrupt_in_endpoint; struct urb *interrupt_in_urb; int interrupt_in_interval; int interrupt_in_done; char *interrupt_out_buffer; struct usb_endpoint_descriptor *interrupt_out_endpoint; struct urb *interrupt_out_urb; int interrupt_out_interval; int interrupt_out_busy; }; /* local function prototypes */ static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos); static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); static inline void tower_delete(struct lego_usb_tower *dev); static int tower_open(struct inode *inode, struct file *file); static int tower_release(struct inode *inode, struct file *file); static __poll_t tower_poll(struct file *file, poll_table *wait); static loff_t tower_llseek(struct file *file, loff_t off, int whence); static void tower_check_for_read_packet(struct lego_usb_tower *dev); static void tower_interrupt_in_callback(struct urb *urb); static void tower_interrupt_out_callback(struct urb *urb); static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id); static void tower_disconnect(struct usb_interface *interface); /* file operations needed when we register this driver */ static const struct file_operations tower_fops = { .owner = THIS_MODULE, .read = tower_read, .write = tower_write, .open = tower_open, .release = tower_release, .poll = tower_poll, .llseek = tower_llseek, }; static char *legousbtower_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver tower_class = { .name = "legousbtower%d", .devnode = legousbtower_devnode, .fops = &tower_fops, .minor_base = LEGO_USB_TOWER_MINOR_BASE, }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver tower_driver = { .name = "legousbtower", .probe = tower_probe, .disconnect = tower_disconnect, .id_table = tower_table, }; /* * lego_usb_tower_debug_data */ static inline void lego_usb_tower_debug_data(struct device *dev, const char *function, int size, const unsigned char *data) { dev_dbg(dev, "%s - length = %d, data = %*ph\n", function, size, size, data); } /* * tower_delete */ static inline void tower_delete(struct lego_usb_tower *dev) { /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree(dev->read_buffer); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); usb_put_dev(dev->udev); kfree(dev); } /* * tower_open */ static int tower_open(struct inode *inode, struct file *file) { struct lego_usb_tower *dev = NULL; int subminor; int retval = 0; struct usb_interface *interface; struct tower_reset_reply reset_reply; int result; nonseekable_open(inode, file); subminor = iminor(inode); interface = usb_find_interface(&tower_driver, subminor); if (!interface) { pr_err("error, can't find device for minor %d\n", subminor); retval = -ENODEV; goto exit; } dev = usb_get_intfdata(interface); if (!dev) { retval = -ENODEV; goto exit; } /* lock this device */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* allow opening only once */ if (dev->open_count) { retval = -EBUSY; goto unlock_exit; } /* reset the tower */ result = usb_control_msg_recv(dev->udev, 0, LEGO_USB_TOWER_REQUEST_RESET, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &reset_reply, sizeof(reset_reply), 1000, GFP_KERNEL); if (result < 0) { dev_err(&dev->udev->dev, "LEGO USB Tower reset control request failed\n"); retval = result; goto unlock_exit; } /* initialize in direction */ dev->read_buffer_length = 0; dev->read_packet_length = 0; usb_fill_int_urb(dev->interrupt_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), tower_interrupt_in_callback, dev, dev->interrupt_in_interval); dev->interrupt_in_done = 0; mb(); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev_err(&dev->udev->dev, "Couldn't submit interrupt_in_urb %d\n", retval); goto unlock_exit; } /* save device in the file's private structure */ file->private_data = dev; dev->open_count = 1; unlock_exit: mutex_unlock(&dev->lock); exit: return retval; } /* * tower_release */ static int tower_release(struct inode *inode, struct file *file) { struct lego_usb_tower *dev; int retval = 0; dev = file->private_data; if (dev == NULL) { retval = -ENODEV; goto exit; } mutex_lock(&dev->lock); if (dev->disconnected) { /* the device was unplugged before the file was released */ /* unlock here as tower_delete frees dev */ mutex_unlock(&dev->lock); tower_delete(dev); goto exit; } /* wait until write transfer is finished */ if (dev->interrupt_out_busy) { wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy, 2 * HZ); } /* shutdown transfers */ usb_kill_urb(dev->interrupt_in_urb); usb_kill_urb(dev->interrupt_out_urb); dev->open_count = 0; mutex_unlock(&dev->lock); exit: return retval; } /* * tower_check_for_read_packet * * To get correct semantics for signals and non-blocking I/O * with packetizing we pretend not to see any data in the read buffer * until it has been there unchanged for at least * dev->packet_timeout_jiffies, or until the buffer is full. */ static void tower_check_for_read_packet(struct lego_usb_tower *dev) { spin_lock_irq(&dev->read_buffer_lock); if (!packet_timeout || time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies) || dev->read_buffer_length == read_buffer_size) { dev->read_packet_length = dev->read_buffer_length; } dev->interrupt_in_done = 0; spin_unlock_irq(&dev->read_buffer_lock); } /* * tower_poll */ static __poll_t tower_poll(struct file *file, poll_table *wait) { struct lego_usb_tower *dev; __poll_t mask = 0; dev = file->private_data; if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); tower_check_for_read_packet(dev); if (dev->read_packet_length > 0) mask |= EPOLLIN | EPOLLRDNORM; if (!dev->interrupt_out_busy) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } /* * tower_llseek */ static loff_t tower_llseek(struct file *file, loff_t off, int whence) { return -ESPIPE; /* unseekable */ } /* * tower_read */ static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct lego_usb_tower *dev; size_t bytes_to_read; int i; int retval = 0; unsigned long timeout = 0; dev = file->private_data; /* lock this object */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* verify that the device wasn't unplugged */ if (dev->disconnected) { retval = -ENODEV; goto unlock_exit; } /* verify that we actually have some data to read */ if (count == 0) { dev_dbg(&dev->udev->dev, "read request of 0 bytes\n"); goto unlock_exit; } if (read_timeout) timeout = jiffies + msecs_to_jiffies(read_timeout); /* wait for data */ tower_check_for_read_packet(dev); while (dev->read_packet_length == 0) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto unlock_exit; } retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies); if (retval < 0) goto unlock_exit; /* reset read timeout during read or write activity */ if (read_timeout && (dev->read_buffer_length || dev->interrupt_out_busy)) { timeout = jiffies + msecs_to_jiffies(read_timeout); } /* check for read timeout */ if (read_timeout && time_after(jiffies, timeout)) { retval = -ETIMEDOUT; goto unlock_exit; } tower_check_for_read_packet(dev); } /* copy the data from read_buffer into userspace */ bytes_to_read = min(count, dev->read_packet_length); if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) { retval = -EFAULT; goto unlock_exit; } spin_lock_irq(&dev->read_buffer_lock); dev->read_buffer_length -= bytes_to_read; dev->read_packet_length -= bytes_to_read; for (i = 0; i < dev->read_buffer_length; i++) dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read]; spin_unlock_irq(&dev->read_buffer_lock); retval = bytes_to_read; unlock_exit: /* unlock the device */ mutex_unlock(&dev->lock); exit: return retval; } /* * tower_write */ static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct lego_usb_tower *dev; size_t bytes_to_write; int retval = 0; dev = file->private_data; /* lock this object */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* verify that the device wasn't unplugged */ if (dev->disconnected) { retval = -ENODEV; goto unlock_exit; } /* verify that we actually have some data to write */ if (count == 0) { dev_dbg(&dev->udev->dev, "write request of 0 bytes\n"); goto unlock_exit; } /* wait until previous transfer is finished */ while (dev->interrupt_out_busy) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto unlock_exit; } retval = wait_event_interruptible(dev->write_wait, !dev->interrupt_out_busy); if (retval) goto unlock_exit; } /* write the data into interrupt_out_buffer from userspace */ bytes_to_write = min_t(int, count, write_buffer_size); dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n", __func__, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) { retval = -EFAULT; goto unlock_exit; } /* send off the urb */ usb_fill_int_urb(dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, tower_interrupt_out_callback, dev, dev->interrupt_out_interval); dev->interrupt_out_busy = 1; wmb(); retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL); if (retval) { dev->interrupt_out_busy = 0; dev_err(&dev->udev->dev, "Couldn't submit interrupt_out_urb %d\n", retval); goto unlock_exit; } retval = bytes_to_write; unlock_exit: /* unlock the device */ mutex_unlock(&dev->lock); exit: return retval; } /* * tower_interrupt_in_callback */ static void tower_interrupt_in_callback(struct urb *urb) { struct lego_usb_tower *dev = urb->context; int status = urb->status; int retval; unsigned long flags; lego_usb_tower_debug_data(&dev->udev->dev, __func__, urb->actual_length, urb->transfer_buffer); if (status) { if (status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN) { goto exit; } else { dev_dbg(&dev->udev->dev, "%s: nonzero status received: %d\n", __func__, status); goto resubmit; /* maybe we can recover */ } } if (urb->actual_length > 0) { spin_lock_irqsave(&dev->read_buffer_lock, flags); if (dev->read_buffer_length + urb->actual_length < read_buffer_size) { memcpy(dev->read_buffer + dev->read_buffer_length, dev->interrupt_in_buffer, urb->actual_length); dev->read_buffer_length += urb->actual_length; dev->read_last_arrival = jiffies; dev_dbg(&dev->udev->dev, "%s: received %d bytes\n", __func__, urb->actual_length); } else { pr_warn("read_buffer overflow, %d bytes dropped\n", urb->actual_length); } spin_unlock_irqrestore(&dev->read_buffer_lock, flags); } resubmit: retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n", __func__, retval); } exit: dev->interrupt_in_done = 1; wake_up_interruptible(&dev->read_wait); } /* * tower_interrupt_out_callback */ static void tower_interrupt_out_callback(struct urb *urb) { struct lego_usb_tower *dev = urb->context; int status = urb->status; lego_usb_tower_debug_data(&dev->udev->dev, __func__, urb->actual_length, urb->transfer_buffer); /* sync/async unlink faults aren't errors */ if (status && !(status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN)) { dev_dbg(&dev->udev->dev, "%s: nonzero write bulk status received: %d\n", __func__, status); } dev->interrupt_out_busy = 0; wake_up_interruptible(&dev->write_wait); } /* * tower_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct device *idev = &interface->dev; struct usb_device *udev = interface_to_usbdev(interface); struct lego_usb_tower *dev; struct tower_get_version_reply get_version_reply; int retval = -ENOMEM; int result; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto exit; mutex_init(&dev->lock); dev->udev = usb_get_dev(udev); spin_lock_init(&dev->read_buffer_lock); dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout); dev->read_last_arrival = jiffies; init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); result = usb_find_common_endpoints_reverse(interface->cur_altsetting, NULL, NULL, &dev->interrupt_in_endpoint, &dev->interrupt_out_endpoint); if (result) { dev_err(idev, "interrupt endpoints not found\n"); retval = result; goto error; } dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL); if (!dev->read_buffer) goto error; dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL); if (!dev->interrupt_in_buffer) goto error; dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_in_urb) goto error; dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL); if (!dev->interrupt_out_buffer) goto error; dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_out_urb) goto error; dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; /* get the firmware version and log it */ result = usb_control_msg_recv(udev, 0, LEGO_USB_TOWER_REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &get_version_reply, sizeof(get_version_reply), 1000, GFP_KERNEL); if (result) { dev_err(idev, "get version request failed: %d\n", result); retval = result; goto error; } dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d build %d\n", get_version_reply.major, get_version_reply.minor, le16_to_cpu(get_version_reply.build_no)); /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &tower_class); if (retval) { /* something prevented us from registering this driver */ dev_err(idev, "Not able to get a minor for this device.\n"); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), USB_MAJOR, dev->minor); exit: return retval; error: tower_delete(dev); return retval; } /* * tower_disconnect * * Called by the usb core when the device is removed from the system. */ static void tower_disconnect(struct usb_interface *interface) { struct lego_usb_tower *dev; int minor; dev = usb_get_intfdata(interface); minor = dev->minor; /* give back our minor and prevent further open() */ usb_deregister_dev(interface, &tower_class); /* stop I/O */ usb_poison_urb(dev->interrupt_in_urb); usb_poison_urb(dev->interrupt_out_urb); mutex_lock(&dev->lock); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) { mutex_unlock(&dev->lock); tower_delete(dev); } else { dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); mutex_unlock(&dev->lock); } dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n", (minor - LEGO_USB_TOWER_MINOR_BASE)); } module_usb_driver(tower_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
| 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 | // SPDX-License-Identifier: GPL-2.0-or-later /* * usbusx2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usx2y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usx2y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); #ifdef USX2Y_NRPACKS_VARIABLE int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ module_param(nrpacks, int, 0444); MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); #endif /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usx2y_out04_int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usx2ydev *usx2y = urb->context; for (i = 0; i < 10 && usx2y->as04.urb[i] != urb; i++) ; dev_dbg(&urb->dev->dev, "%s urb %i status=%i\n", __func__, i, urb->status); } #endif } static void i_usx2y_in04_int(struct urb *urb) { int err = 0; struct usx2ydev *usx2y = urb->context; struct us428ctls_sharedmem *us428ctls = usx2y->us428ctls_sharedmem; struct us428_p4out *p4out; int i, j, n, diff, send; usx2y->in04_int_calls++; if (urb->status) { dev_dbg(&urb->dev->dev, "Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } if (us428ctls) { diff = -1; if (us428ctls->ctl_snapshot_last == -2) { diff = 0; memcpy(usx2y->in04_last, usx2y->in04_buf, sizeof(usx2y->in04_last)); us428ctls->ctl_snapshot_last = -1; } else { for (i = 0; i < 21; i++) { if (usx2y->in04_last[i] != ((char *)usx2y->in04_buf)[i]) { if (diff < 0) diff = i; usx2y->in04_last[i] = ((char *)usx2y->in04_buf)[i]; } } } if (diff >= 0) { n = us428ctls->ctl_snapshot_last + 1; if (n >= N_US428_CTL_BUFS || n < 0) n = 0; memcpy(us428ctls->ctl_snapshot + n, usx2y->in04_buf, sizeof(us428ctls->ctl_snapshot[0])); us428ctls->ctl_snapshot_differs_at[n] = diff; us428ctls->ctl_snapshot_last = n; wake_up(&usx2y->us428ctls_wait_queue_head); } } if (usx2y->us04) { if (!usx2y->us04->submitted) { do { err = usb_submit_urb(usx2y->us04->urb[usx2y->us04->submitted++], GFP_ATOMIC); } while (!err && usx2y->us04->submitted < usx2y->us04->len); } } else { if (us428ctls && us428ctls->p4out_last >= 0 && us428ctls->p4out_last < N_US428_P4OUT_BUFS) { if (us428ctls->p4out_last != us428ctls->p4out_sent) { send = us428ctls->p4out_sent + 1; if (send >= N_US428_P4OUT_BUFS) send = 0; for (j = 0; j < URBS_ASYNC_SEQ && !err; ++j) { if (!usx2y->as04.urb[j]->status) { p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usx2y->as04.urb[j], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), &p4out->val.vol, p4out->type == ELT_LIGHT ? sizeof(struct us428_lights) : 5, i_usx2y_out04_int, usx2y); err = usb_submit_urb(usx2y->as04.urb[j], GFP_ATOMIC); us428ctls->p4out_sent = send; break; } } } } } if (err) dev_err(&urb->dev->dev, "in04_int() usb_submit_urb err=%i\n", err); urb->dev = usx2y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usx2y_async_seq04_init(struct usx2ydev *usx2y) { int err = 0, i; if (WARN_ON(usx2y->as04.buffer)) return -EBUSY; usx2y->as04.buffer = kmalloc_array(URBS_ASYNC_SEQ, URB_DATA_LEN_ASYNC_SEQ, GFP_KERNEL); if (!usx2y->as04.buffer) { err = -ENOMEM; } else { for (i = 0; i < URBS_ASYNC_SEQ; ++i) { usx2y->as04.urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->as04.urb[i]) { err = -ENOMEM; break; } usb_fill_bulk_urb(usx2y->as04.urb[i], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), usx2y->as04.buffer + URB_DATA_LEN_ASYNC_SEQ * i, 0, i_usx2y_out04_int, usx2y); err = usb_urb_ep_type_check(usx2y->as04.urb[i]); if (err < 0) break; } } if (err) usx2y_unlinkseq(&usx2y->as04); return err; } int usx2y_in04_init(struct usx2ydev *usx2y) { int err; if (WARN_ON(usx2y->in04_urb)) return -EBUSY; usx2y->in04_urb = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->in04_urb) { err = -ENOMEM; goto error; } usx2y->in04_buf = kmalloc(21, GFP_KERNEL); if (!usx2y->in04_buf) { err = -ENOMEM; goto error; } init_waitqueue_head(&usx2y->in04_wait_queue); usb_fill_int_urb(usx2y->in04_urb, usx2y->dev, usb_rcvintpipe(usx2y->dev, 0x4), usx2y->in04_buf, 21, i_usx2y_in04_int, usx2y, 10); if (usb_urb_ep_type_check(usx2y->in04_urb)) { err = -EINVAL; goto error; } return usb_submit_urb(usx2y->in04_urb, GFP_KERNEL); error: kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); usx2y->in04_buf = NULL; usx2y->in04_urb = NULL; return err; } static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s) { int i; for (i = 0; i < URBS_ASYNC_SEQ; ++i) { if (!s->urb[i]) continue; usb_kill_urb(s->urb[i]); usb_free_urb(s->urb[i]); s->urb[i] = NULL; } kfree(s->buffer); s->buffer = NULL; } static const struct usb_device_id snd_usx2y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, snd_usx2y_usb_id_table); static int usx2y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card *card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usx2y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct usx2ydev), &card); if (err < 0) return err; snd_usx2y_card_used[usx2y(card)->card_index = dev] = 1; card->private_free = snd_usx2y_card_private_free; usx2y(card)->dev = device; init_waitqueue_head(&usx2y(card)->prepare_wait_queue); init_waitqueue_head(&usx2y(card)->us428ctls_wait_queue_head); mutex_init(&usx2y(card)->pcm_mutex); INIT_LIST_HEAD(&usx2y(card)->midi_list); strscpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usx2y(card)->dev->bus->busnum, usx2y(card)->dev->devnum); *cardp = card; return 0; } static void snd_usx2y_card_private_free(struct snd_card *card) { struct usx2ydev *usx2y = usx2y(card); kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); if (usx2y->us428ctls_sharedmem) free_pages_exact(usx2y->us428ctls_sharedmem, US428_SHAREDMEM_PAGES); if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS) snd_usx2y_card_used[usx2y->card_index] = 0; } static void snd_usx2y_disconnect(struct usb_interface *intf) { struct snd_card *card; struct usx2ydev *usx2y; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return; usx2y = usx2y(card); usx2y->chip_status = USX2Y_STAT_CHIP_HUP; usx2y_unlinkseq(&usx2y->as04); usb_kill_urb(usx2y->in04_urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usx2y->midi_list) { snd_usbmidi_disconnect(p); } if (usx2y->us428ctls_sharedmem) wake_up(&usx2y->us428ctls_wait_queue_head); snd_card_free_when_closed(card); } static int snd_usx2y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; #ifdef USX2Y_NRPACKS_VARIABLE if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) return -EINVAL; #endif if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usx2y_create_card(device, intf, &card); if (err < 0) return err; err = usx2y_hwdep_new(card, device); if (err < 0) goto error; err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(&intf->dev, card); return 0; error: snd_card_free(card); return err; } static struct usb_driver snd_usx2y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usx2y_probe, .disconnect = snd_usx2y_disconnect, .id_table = snd_usx2y_usb_id_table, }; module_usb_driver(snd_usx2y_usb_driver); |
| 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for Topre REALFORCE Keyboards * * Copyright (c) 2022 Harry Stern <harry@harrystern.net> * * Based on the hid-macally driver */ #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" MODULE_AUTHOR("Harry Stern <harry@harrystern.net>"); MODULE_DESCRIPTION("REALFORCE R2 Keyboard driver"); MODULE_LICENSE("GPL"); /* * Fix the REALFORCE R2's non-boot interface's report descriptor to match the * events it's actually sending. It claims to send array events but is instead * sending variable events. */ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 119 && rdesc[69] == 0x29 && rdesc[70] == 0xe7 && rdesc[71] == 0x81 && rdesc[72] == 0x00) { hid_info(hdev, "fixing up Topre REALFORCE keyboard report descriptor\n"); rdesc[72] = 0x02; } else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 && rdesc[30] == 0x81 && rdesc[31] == 0x00) { hid_info(hdev, "fixing up Topre REALFORCE keyboard report descriptor\n"); rdesc[31] = 0x02; } return rdesc; } static const struct hid_device_id topre_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE, USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE, USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE, USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) }, { } }; MODULE_DEVICE_TABLE(hid, topre_id_table); static struct hid_driver topre_driver = { .name = "topre", .id_table = topre_id_table, .report_fixup = topre_report_fixup, }; module_hid_driver(topre_driver); |
| 4 3 1 3 1 3 1 3 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 | // SPDX-License-Identifier: GPL-2.0-only /* * RTL8XXXU mac80211 USB driver - 8723b specific subdriver * * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This driver was written as a replacement for the vendor provided * rtl8723au driver. As the Realtek 8xxx chips are very similar in * their programming interface, I have started adding support for * additional 8xxx chips like the 8192cu, 8188cus, etc. */ #include "regs.h" #include "rtl8xxxu.h" static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = { {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0}, {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05}, {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f}, {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00}, {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f}, {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a}, {0x525, 0x4f}, {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04}, {0xffff, 0xff}, }; static const struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, {0x810, 0x10001331}, {0x814, 0x020c3d10}, {0x818, 0x02200385}, {0x81c, 0x00000000}, {0x820, 0x01000100}, {0x824, 0x00190204}, {0x828, 0x00000000}, {0x82c, 0x00000000}, {0x830, 0x00000000}, {0x834, 0x00000000}, {0x838, 0x00000000}, {0x83c, 0x00000000}, {0x840, 0x00010000}, {0x844, 0x00000000}, {0x848, 0x00000000}, {0x84c, 0x00000000}, {0x850, 0x00000000}, {0x854, 0x00000000}, {0x858, 0x569a11a9}, {0x85c, 0x01000014}, {0x860, 0x66f60110}, {0x864, 0x061f0649}, {0x868, 0x00000000}, {0x86c, 0x27272700}, {0x870, 0x07000760}, {0x874, 0x25004000}, {0x878, 0x00000808}, {0x87c, 0x00000000}, {0x880, 0xb0000c1c}, {0x884, 0x00000001}, {0x888, 0x00000000}, {0x88c, 0xccc000c0}, {0x890, 0x00000800}, {0x894, 0xfffffffe}, {0x898, 0x40302010}, {0x89c, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000000}, {0x90c, 0x81121111}, {0x910, 0x00000002}, {0x914, 0x00000201}, {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c}, {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f}, {0xa10, 0x9500bb78}, {0xa14, 0x1114d028}, {0xa18, 0x00881117}, {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317}, {0xa28, 0x00000204}, {0xa2c, 0x00d30000}, {0xa70, 0x101fbf00}, {0xa74, 0x00000007}, {0xa78, 0x00000900}, {0xa7c, 0x225b0606}, {0xa80, 0x21806490}, {0xb2c, 0x00000000}, {0xc00, 0x48071d40}, {0xc04, 0x03a05611}, {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000}, {0xc14, 0x40000100}, {0xc18, 0x08800000}, {0xc1c, 0x40000100}, {0xc20, 0x00000000}, {0xc24, 0x00000000}, {0xc28, 0x00000000}, {0xc2c, 0x00000000}, {0xc30, 0x69e9ac44}, {0xc34, 0x469652af}, {0xc38, 0x49795994}, {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7}, {0xc48, 0xec020107}, {0xc4c, 0x007f037f}, {0xc50, 0x69553420}, {0xc54, 0x43bc0094}, {0xc58, 0x00013149}, {0xc5c, 0x00250492}, {0xc60, 0x00000000}, {0xc64, 0x7112848b}, {0xc68, 0x47c00bff}, {0xc6c, 0x00000036}, {0xc70, 0x2c7f000d}, {0xc74, 0x020610db}, {0xc78, 0x0000001f}, {0xc7c, 0x00b91612}, {0xc80, 0x390000e4}, {0xc84, 0x20f60000}, {0xc88, 0x40000100}, {0xc8c, 0x20200000}, {0xc90, 0x00020e1a}, {0xc94, 0x00000000}, {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f}, {0xca0, 0x00000000}, {0xca4, 0x000300a0}, {0xca8, 0x00000000}, {0xcac, 0x00000000}, {0xcb0, 0x00000000}, {0xcb4, 0x00000000}, {0xcb8, 0x00000000}, {0xcbc, 0x28000000}, {0xcc0, 0x00000000}, {0xcc4, 0x00000000}, {0xcc8, 0x00000000}, {0xccc, 0x00000000}, {0xcd0, 0x00000000}, {0xcd4, 0x00000000}, {0xcd8, 0x64b22427}, {0xcdc, 0x00766932}, {0xce0, 0x00222222}, {0xce4, 0x00000000}, {0xce8, 0x37644302}, {0xcec, 0x2f97d40c}, {0xd00, 0x00000740}, {0xd04, 0x40020401}, {0xd08, 0x0000907f}, {0xd0c, 0x20010201}, {0xd10, 0xa0633333}, {0xd14, 0x3333bc53}, {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975}, {0xd30, 0x00000000}, {0xd34, 0x80608000}, {0xd38, 0x00000000}, {0xd3c, 0x00127353}, {0xd40, 0x00000000}, {0xd44, 0x00000000}, {0xd48, 0x00000000}, {0xd4c, 0x00000000}, {0xd50, 0x6437140a}, {0xd54, 0x00000000}, {0xd58, 0x00000282}, {0xd5c, 0x30032064}, {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d}, {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d}, {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d}, {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d}, {0xe28, 0x00000000}, {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f}, {0xe38, 0x02140102}, {0xe3c, 0x681604c2}, {0xe40, 0x01007c00}, {0xe44, 0x01004800}, {0xe48, 0xfb000000}, {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f}, {0xe58, 0x02140102}, {0xe5c, 0x28160d05}, {0xe60, 0x00000008}, {0xe68, 0x001b2556}, {0xe6c, 0x00c00096}, {0xe70, 0x00c00096}, {0xe74, 0x01000056}, {0xe78, 0x01000014}, {0xe7c, 0x01000056}, {0xe80, 0x01000014}, {0xe84, 0x00c00096}, {0xe88, 0x01000056}, {0xe8c, 0x00c00096}, {0xed0, 0x00c00096}, {0xed4, 0x00c00096}, {0xed8, 0x00c00096}, {0xedc, 0x000000d6}, {0xee0, 0x000000d6}, {0xeec, 0x01c00016}, {0xf14, 0x00000003}, {0xf4c, 0x00000000}, {0xf00, 0x00000300}, {0x820, 0x01000100}, {0x800, 0x83040000}, {0xffff, 0xffffffff}, }; static const struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = { {0xc78, 0xfd000001}, {0xc78, 0xfc010001}, {0xc78, 0xfb020001}, {0xc78, 0xfa030001}, {0xc78, 0xf9040001}, {0xc78, 0xf8050001}, {0xc78, 0xf7060001}, {0xc78, 0xf6070001}, {0xc78, 0xf5080001}, {0xc78, 0xf4090001}, {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001}, {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001}, {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001}, {0xc78, 0xed100001}, {0xc78, 0xec110001}, {0xc78, 0xeb120001}, {0xc78, 0xea130001}, {0xc78, 0xe9140001}, {0xc78, 0xe8150001}, {0xc78, 0xe7160001}, {0xc78, 0xe6170001}, {0xc78, 0xe5180001}, {0xc78, 0xe4190001}, {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001}, {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001}, {0xc78, 0x671e0001}, {0xc78, 0x661f0001}, {0xc78, 0x65200001}, {0xc78, 0x64210001}, {0xc78, 0x63220001}, {0xc78, 0x4a230001}, {0xc78, 0x49240001}, {0xc78, 0x48250001}, {0xc78, 0x47260001}, {0xc78, 0x46270001}, {0xc78, 0x45280001}, {0xc78, 0x44290001}, {0xc78, 0x432a0001}, {0xc78, 0x422b0001}, {0xc78, 0x292c0001}, {0xc78, 0x282d0001}, {0xc78, 0x272e0001}, {0xc78, 0x262f0001}, {0xc78, 0x0a300001}, {0xc78, 0x09310001}, {0xc78, 0x08320001}, {0xc78, 0x07330001}, {0xc78, 0x06340001}, {0xc78, 0x05350001}, {0xc78, 0x04360001}, {0xc78, 0x03370001}, {0xc78, 0x02380001}, {0xc78, 0x01390001}, {0xc78, 0x013a0001}, {0xc78, 0x013b0001}, {0xc78, 0x013c0001}, {0xc78, 0x013d0001}, {0xc78, 0x013e0001}, {0xc78, 0x013f0001}, {0xc78, 0xfc400001}, {0xc78, 0xfb410001}, {0xc78, 0xfa420001}, {0xc78, 0xf9430001}, {0xc78, 0xf8440001}, {0xc78, 0xf7450001}, {0xc78, 0xf6460001}, {0xc78, 0xf5470001}, {0xc78, 0xf4480001}, {0xc78, 0xf3490001}, {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001}, {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001}, {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001}, {0xc78, 0xec500001}, {0xc78, 0xeb510001}, {0xc78, 0xea520001}, {0xc78, 0xe9530001}, {0xc78, 0xe8540001}, {0xc78, 0xe7550001}, {0xc78, 0xe6560001}, {0xc78, 0xe5570001}, {0xc78, 0xe4580001}, {0xc78, 0xe3590001}, {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001}, {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001}, {0xc78, 0x675e0001}, {0xc78, 0x665f0001}, {0xc78, 0x65600001}, {0xc78, 0x64610001}, {0xc78, 0x63620001}, {0xc78, 0x62630001}, {0xc78, 0x61640001}, {0xc78, 0x48650001}, {0xc78, 0x47660001}, {0xc78, 0x46670001}, {0xc78, 0x45680001}, {0xc78, 0x44690001}, {0xc78, 0x436a0001}, {0xc78, 0x426b0001}, {0xc78, 0x286c0001}, {0xc78, 0x276d0001}, {0xc78, 0x266e0001}, {0xc78, 0x256f0001}, {0xc78, 0x24700001}, {0xc78, 0x09710001}, {0xc78, 0x08720001}, {0xc78, 0x07730001}, {0xc78, 0x06740001}, {0xc78, 0x05750001}, {0xc78, 0x04760001}, {0xc78, 0x03770001}, {0xc78, 0x02780001}, {0xc78, 0x01790001}, {0xc78, 0x017a0001}, {0xc78, 0x017b0001}, {0xc78, 0x017c0001}, {0xc78, 0x017d0001}, {0xc78, 0x017e0001}, {0xc78, 0x017f0001}, {0xc50, 0x69553422}, {0xc50, 0x69553420}, {0x824, 0x00390204}, {0xffff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = { {0x00, 0x00010000}, {0xb0, 0x000dffe0}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xb1, 0x00000018}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xb2, 0x00084c00}, {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa}, {0xb7, 0x00000010}, {0xb8, 0x0000907f}, {0x5c, 0x00000002}, {0x7c, 0x00000002}, {0x7e, 0x00000005}, {0x8b, 0x0006fc00}, {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2}, {0x1e, 0x00000000}, {0xdf, 0x00000780}, {0x50, 0x00067435}, /* * The 8723bu vendor driver indicates that bit 8 should be set in * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However * they never actually check the package type - and just default * to not setting it. */ {0x51, 0x0006b04e}, {0x52, 0x000007d2}, {0x53, 0x00000000}, {0x54, 0x00050400}, {0x55, 0x0004026e}, {0xdd, 0x0000004c}, {0x70, 0x00067435}, /* * 0x71 has same package type condition as for register 0x51 */ {0x71, 0x0006b04e}, {0x72, 0x000007d2}, {0x73, 0x00000000}, {0x74, 0x00050400}, {0x75, 0x0004026e}, {0xef, 0x00000100}, {0x34, 0x0000add7}, {0x35, 0x00005c00}, {0x34, 0x00009dd4}, {0x35, 0x00005000}, {0x34, 0x00008dd1}, {0x35, 0x00004400}, {0x34, 0x00007dce}, {0x35, 0x00003800}, {0x34, 0x00006cd1}, {0x35, 0x00004400}, {0x34, 0x00005cce}, {0x35, 0x00003800}, {0x34, 0x000048ce}, {0x35, 0x00004400}, {0x34, 0x000034ce}, {0x35, 0x00003800}, {0x34, 0x00002451}, {0x35, 0x00004400}, {0x34, 0x0000144e}, {0x35, 0x00003800}, {0x34, 0x00000051}, {0x35, 0x00004400}, {0xef, 0x00000000}, {0xef, 0x00000100}, {0xed, 0x00000010}, {0x44, 0x0000add7}, {0x44, 0x00009dd4}, {0x44, 0x00008dd1}, {0x44, 0x00007dce}, {0x44, 0x00006cc1}, {0x44, 0x00005cce}, {0x44, 0x000044d1}, {0x44, 0x000034ce}, {0x44, 0x00002451}, {0x44, 0x0000144e}, {0x44, 0x00000051}, {0xef, 0x00000000}, {0xed, 0x00000000}, {0x7f, 0x00020080}, {0xef, 0x00002000}, {0x3b, 0x000380ef}, {0x3b, 0x000302fe}, {0x3b, 0x00028ce6}, {0x3b, 0x000200bc}, {0x3b, 0x000188a5}, {0x3b, 0x00010fbc}, {0x3b, 0x00008f71}, {0x3b, 0x00000900}, {0xef, 0x00000000}, {0xed, 0x00000001}, {0x40, 0x000380ef}, {0x40, 0x000302fe}, {0x40, 0x00028ce6}, {0x40, 0x000200bc}, {0x40, 0x000188a5}, {0x40, 0x00010fbc}, {0x40, 0x00008f71}, {0x40, 0x00000900}, {0xed, 0x00000000}, {0x82, 0x00080000}, {0x83, 0x00008000}, {0x84, 0x00048d80}, {0x85, 0x00068000}, {0xa2, 0x00080000}, {0xa3, 0x00008000}, {0xa4, 0x00048d80}, {0xa5, 0x00068000}, {0xed, 0x00000002}, {0xef, 0x00000002}, {0x56, 0x00000032}, {0x76, 0x00000032}, {0x01, 0x00000780}, {0xff, 0xffffffff} }; static int rtl8723bu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u32 val32, sys_cfg, vendor; int ret = 0; sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK); if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); ret = -ENOTSUPP; goto out; } strscpy(priv->chip_name, "8723BU", sizeof(priv->chip_name)); priv->rtl_chip = RTL8723B; priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL); if (val32 & MULTI_WIFI_FUNC_EN) priv->has_wifi = 1; if (val32 & MULTI_BT_FUNC_EN) priv->has_bluetooth = 1; if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; priv->is_multi_func = 1; vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK; rtl8xxxu_identify_vendor_2bits(priv, vendor); val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS); priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID); rtl8xxxu_config_endpoints_sie(priv); /* * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX */ if (!priv->ep_tx_count) ret = rtl8xxxu_config_endpoints_no_sie(priv); out: return ret; } static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data) { struct h2c_cmd h2c; int reqnum = 0; memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; h2c.bt_mp_oper.data = data; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); reqnum++; memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; h2c.bt_mp_oper.addr = reg; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); } static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv) { u8 val8; u16 sys_func; val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); sys_func &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func |= SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); } static void rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { u32 val32, ofdm, mcs; u8 cck, ofdmbase, mcsbase; int group, tx_idx; tx_idx = 0; group = rtl8xxxu_gen2_channel_to_group(channel); cck = priv->cck_tx_power_index_B[group]; val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); val32 &= 0xffff00ff; val32 |= (cck << 8); rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); val32 &= 0xff; val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); ofdmbase = priv->ht40_1s_tx_power_index_B[group]; ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b; ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); mcsbase = priv->ht40_1s_tx_power_index_B[group]; if (ht40) mcsbase += priv->ht40_tx_power_diff[tx_idx++].b; else mcsbase += priv->ht20_tx_power_diff[tx_idx++].b; mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs); rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); } static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu; int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, sizeof(efuse->tx_power_index_A.cck_base)); memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base, sizeof(efuse->tx_power_index_B.cck_base)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); memcpy(priv->ht40_1s_tx_power_index_B, efuse->tx_power_index_B.ht40_base, sizeof(efuse->tx_power_index_B.ht40_base)); priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ofdm_tx_power_diff[0].b = efuse->tx_power_index_B.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; priv->ht20_tx_power_diff[0].b = efuse->tx_power_index_B.ht20_ofdm_1s_diff.b; priv->ht40_tx_power_diff[0].a = 0; priv->ht40_tx_power_diff[0].b = 0; for (i = 1; i < RTL8723B_TX_COUNT; i++) { priv->ofdm_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ofdm; priv->ofdm_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ofdm; priv->ht20_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht20; priv->ht20_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht20; priv->ht40_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht40; priv->ht40_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht40; } priv->default_crystal_cap = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f; return 0; } static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv) { const char *fw_name; int ret; if (priv->enable_bluetooth) fw_name = "rtlwifi/rtl8723bu_bt.bin"; else fw_name = "rtlwifi/rtl8723bu_nic.bin"; ret = rtl8xxxu_load_firmware(priv, fw_name); return ret; } static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); /* 6. 0x1f[7:0] = 0x07 */ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); /* Why? */ rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); } static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv) { int ret; ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A); /* * PHY LCK */ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01); msleep(200); rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); return ret; } void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1); val32 &= ~(BIT(20) | BIT(24)); rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32); val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 &= ~BIT(4); rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 |= BIT(3); rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 |= BIT(24); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 &= ~BIT(23); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC); val32 &= 0xffffff00; val32 |= 0x77; rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write32(priv, REG_PWR_DATA, val32); } static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_e94, reg_e9c, path_sel, val32; int result = 0; path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * The vendor driver indicates the USB module is always using * S0S1 path 1 for the 8723bu. This may be different for 8192eu */ if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. * No trace of this in the 8192eu or 8188eu vendor drivers. */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); val32 = (reg_e9c >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000) && ((reg_e94 & 0x03ff0000) < 0x01100000) && ((reg_e94 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x01; else /* If TX not OK, ignore RX */ goto out; out: return result; } static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32; int result = 0; path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * The vendor driver indicates the USB module is always using * S0S1 path 1 for the 8723bu. This may be different for 8192eu */ if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. * No trace of this in the 8192eu or 8188eu vendor drivers. */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); val32 = (reg_e9c >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000) && ((reg_e94 & 0x03ff0000) < 0x01100000) && ((reg_e94 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x01; else /* If TX not OK, ignore RX */ goto out; val32 = 0x80007c00 | (reg_e94 &0x3ff0000) | ((reg_e9c & 0x3ff0000) >> 16); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* * Modify RX IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77); /* * PA, PAD setting */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0xf80); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f); /* * RX IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Disable BT */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x780); val32 = (reg_eac >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(27)) && ((reg_ea4 & 0x03ff0000) != 0x01320000) && ((reg_eac & 0x03ff0000) != 0x00360000) && ((reg_ea4 & 0x03ff0000) < 0x01100000) && ((reg_ea4 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x02; else /* If TX not OK, ignore RX */ goto out; out: return result; } static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { struct device *dev = &priv->udev->dev; u32 i, val32; int path_a_ok /*, path_b_ok */; int retry = 2; static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, REG_RX_WAIT_CCA, REG_TX_CCK_RFON, REG_TX_CCK_BBON, REG_TX_OFDM_RFON, REG_TX_OFDM_BBON, REG_TX_TO_RX, REG_TX_TO_TX, REG_RX_CCK, REG_RX_OFDM, REG_RX_WAIT_RIFS, REG_RX_TO_RX, REG_STANDBY, REG_SLEEP, REG_PMPD_ANAEN }; static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { REG_TXPAUSE, REG_BEACON_CTRL, REG_BEACON_CTRL_1, REG_GPIO_MUXCFG }; static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE }; u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff; u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff; /* * Note: IQ calibration must be performed after loading * PHY_REG.txt , and radio_a, radio_b.txt */ if (t == 0) { /* Save ADDA parameters, turn Path A ADDA on */ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); rtl8xxxu_save_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); } rtl8xxxu_path_adda_on(priv, adda_regs, true); /* MAC settings */ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup); val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); val32 |= 0x0f000000; rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); /* * RX IQ calibration setting for 8723B D cut large current issue * when leaving IPS */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); val32 |= 0x20; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd); for (i = 0; i < retry; i++) { path_a_ok = rtl8723bu_iqk_path_a(priv); if (path_a_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); result[t][1] = (val32 >> 16) & 0x3ff; break; } } if (!path_a_ok) dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__); for (i = 0; i < retry; i++) { path_a_ok = rtl8723bu_rx_iqk_path_a(priv); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); result[t][2] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); result[t][3] = (val32 >> 16) & 0x3ff; break; } } if (!path_a_ok) dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__); if (priv->tx_paths > 1) { #if 1 dev_warn(dev, "%s: Path B not supported\n", __func__); #else /* * Path A into standby */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000); val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Turn Path B ADDA on */ rtl8xxxu_path_adda_on(priv, adda_regs, false); for (i = 0; i < retry; i++) { path_b_ok = rtl8xxxu_iqk_path_b(priv); if (path_b_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); result[t][4] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); result[t][5] = (val32 >> 16) & 0x3ff; break; } } if (!path_b_ok) dev_dbg(dev, "%s: Path B IQK failed!\n", __func__); for (i = 0; i < retry; i++) { path_b_ok = rtl8723bu_rx_iqk_path_b(priv); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); result[t][6] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); result[t][7] = (val32 >> 16) & 0x3ff; break; } } if (!path_b_ok) dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__); #endif } /* Back to BB mode, load original value */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (t) { /* Reload ADDA power saving parameters */ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); /* Reload MAC parameters */ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); /* Reload BB parameters */ rtl8xxxu_restore_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); /* Restore RX initial gain */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); val32 &= 0xffffff00; rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc); if (priv->tx_paths > 1) { val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1); val32 &= 0xffffff00; rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, val32 | 0x50); rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, val32 | xb_agc); } /* Load 0xe30 IQC default value */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); } } static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ int i, candidate; bool path_a_ok, path_b_ok; u32 reg_e94, reg_e9c, reg_ea4, reg_eac; u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; u32 val32, bt_control; s32 reg_tmp = 0; bool simu; rtl8xxxu_gen2_prepare_calibrate(priv, 1); memset(result, 0, sizeof(result)); candidate = -1; path_a_ok = false; path_b_ok = false; bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU); for (i = 0; i < 3; i++) { rtl8723bu_phy_iqcalibrate(priv, result, i); if (i == 1) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1); if (simu) { candidate = 0; break; } } if (i == 2) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2); if (simu) { candidate = 0; break; } simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2); if (simu) { candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp) candidate = 3; else candidate = -1; } } } for (i = 0; i < 4; i++) { reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; reg_ec4 = result[i][6]; reg_ecc = result[i][7]; } if (candidate >= 0) { reg_e94 = result[candidate][0]; priv->rege94 = reg_e94; reg_e9c = result[candidate][1]; priv->rege9c = reg_e9c; reg_ea4 = result[candidate][2]; reg_eac = result[candidate][3]; reg_eb4 = result[candidate][4]; priv->regeb4 = reg_eb4; reg_ebc = result[candidate][5]; priv->regebc = reg_ebc; reg_ec4 = result[candidate][6]; reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; } else { reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; } if (reg_e94 && candidate >= 0) rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, candidate, (reg_ea4 == 0)); if (priv->tx_paths > 1 && reg_eb4) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); val32 |= 0x20; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd); if (priv->rf_paths > 1) dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__); rtl8xxxu_gen2_prepare_calibrate(priv, 0); } static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; u32 val32; int count, ret = 0; /* Turn off RF */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0); /* Enable rising edge triggering interrupt */ val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM); val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ; rtl8xxxu_write16(priv, REG_GPIO_INTM, val16); /* Release WLON reset 0x04[16]= 1*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_WLON_RESET; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* 0x0005[1] = 1 turn off MAC by HW state machine*/ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); if ((val8 & BIT(1)) == 0) break; udelay(10); } if (!count) { dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n", __func__); ret = -EBUSY; goto exit; } /* Enable BT control XTAL setting */ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); val8 &= ~AFE_MISC_WL_XTAL_CTRL; rtl8xxxu_write8(priv, REG_AFE_MISC, val8); /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); val8 |= SYS_ISO_ANALOG_IPS; rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); /* 0x0020[0] = 0 disable LDOA12 MACRO block*/ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); val8 &= ~LDOA15_ENABLE; rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); exit: return ret; } static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; int count, ret = 0; /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); val8 |= LDOA15_ENABLE; rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/ val8 = rtl8xxxu_read8(priv, 0x0067); val8 &= ~BIT(4); rtl8xxxu_write8(priv, 0x0067, val8); mdelay(1); /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); val8 &= ~SYS_ISO_ANALOG_IPS; rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); /* Disable SW LPS 0x04[10]= 0 */ val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO); val32 &= ~APS_FSMCO_SW_LPS; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Wait until 0x04[17] = 1 power ready */ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if (val32 & BIT(17)) break; udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* We should be able to optimize the following three entries into one */ /* Release WLON reset 0x04[16]= 1*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_WLON_RESET; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Disable HWPDN 0x04[15]= 0*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 &= ~APS_FSMCO_HW_POWERDOWN; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Disable WL suspend*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE); rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Set, then poll until 0 */ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_MAC_ENABLE; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { ret = 0; break; } udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* Enable WL control XTAL setting */ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); val8 |= AFE_MISC_WL_XTAL_CTRL; rtl8xxxu_write8(priv, REG_AFE_MISC, val8); /* Enable falling edge triggering interrupt */ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8); /* Enable GPIO9 interrupt mode */ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8); /* Enable GPIO9 input mode */ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8); /* Enable HSISR GPIO[C:0] interrupt */ val8 = rtl8xxxu_read8(priv, REG_HSIMR); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_HSIMR, val8); /* Enable HSISR GPIO9 interrupt */ val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_HSIMR + 2, val8); val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL); val8 |= MULTI_WIFI_HW_ROF_EN; rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8); /* For GPIO9 internal pull high setting BIT(14) */ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1); val8 |= BIT(6); rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8); exit: return ret; } static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; u32 val32; int ret; rtl8xxxu_disabled_to_emu(priv); ret = rtl8723b_emu_to_active(priv); if (ret) goto exit; /* * Enable MAC DMA/WMAC/SCHEDULE/SEC block * Set CR bit10 to enable 32k calibration. */ val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); /* * BT coexist power on settings. This is identical for 1 and 2 * antenna parts. */ rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20); val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18); rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); /* Antenna inverse */ rtl8xxxu_write8(priv, 0xfe08, 0x01); val16 = rtl8xxxu_read16(priv, REG_PWR_DATA); val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write16(priv, REG_PWR_DATA, val16); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 |= LEDCFG0_DPDT_SELECT; rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA; rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); exit: return ret; } static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; rtl8xxxu_flush_fifo(priv); /* * Disable TX report timer */ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); rtl8xxxu_write8(priv, REG_CR, 0x0000); rtl8xxxu_active_to_lps(priv); /* Reset Firmware if running in RAM */ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) rtl8xxxu_firmware_self_reset(priv); /* Reset MCU */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); /* Reset MCU ready status */ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); rtl8723bu_active_to_emu(priv); val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8); } static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) { struct h2c_cmd h2c; u32 val32; u8 val8; val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); val32 |= (BIT(22) | BIT(23)); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); /* * No indication anywhere as to what 0x0790 does. The 2 antenna * vendor code preserves bits 6-7 here. */ rtl8xxxu_write8(priv, 0x0790, 0x05); /* * 0x0778 seems to be related to enabling the number of antennas * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01 */ rtl8xxxu_write8(priv, 0x0778, 0x01); val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); val8 |= BIT(5); rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780); rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */ /* * Set BT grant to low */ memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_grant.cmd = H2C_8723B_BT_GRANT; h2c.bt_grant.data = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant)); /* * WLAN action by PTA */ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c); /* * BT select S0/S1 controlled by WiFi */ val8 = rtl8xxxu_read8(priv, 0x0067); val8 |= BIT(5); rtl8xxxu_write8(priv, 0x0067, val8); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write32(priv, REG_PWR_DATA, val32); /* * Bits 6/7 are marked in/out ... but for what? */ rtl8xxxu_write8(priv, 0x0974, 0xff); val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 &= ~BIT(24); val32 |= BIT(23); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); /* * Fix external switch Main->S1, Aux->S0 */ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV; h2c.ant_sel_rsv.ant_inverse = 1; h2c.ant_sel_rsv.int_switch_type = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv)); /* * Different settings per different antenna position. * Antenna Position: | Normal Inverse * -------------------------------------------------- * Antenna switch to BT: | 0x280, 0x00 * Antenna switch to WiFi: | 0x0, 0x280 * Antenna switch to PTA: | 0x200, 0x80 */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x80); /* * Software control, antenna at WiFi side */ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_info.cmd = H2C_8723B_BT_INFO; h2c.bt_info.data = BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info)); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; h2c.ignore_wlan.data = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv) { u32 agg_rx; u8 agg_ctrl; /* * For now simply disable RX aggregation */ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); agg_rx &= ~RXDMA_USB_AGG_ENABLE; agg_rx &= ~0xff0f; rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); } static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv) { u32 val32; /* Time duration for NHM unit: 4us, 0x2710=40ms */ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710); rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff); rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52); rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff); /* TH8 */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 |= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Enable CCK */ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B); val32 |= BIT(8) | BIT(9) | BIT(10); rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32); /* Max power amongst all RX antennas */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC); val32 |= BIT(7); rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats) { u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a; s8 rx_pwr_all = 0x00; u8 vga_idx, lna_idx; lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK); vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK); switch (lna_idx) { case 6: rx_pwr_all = -34 - (2 * vga_idx); break; case 4: rx_pwr_all = -14 - (2 * vga_idx); break; case 1: rx_pwr_all = 6 - (2 * vga_idx); break; case 0: rx_pwr_all = 16 - (2 * vga_idx); break; default: break; } return rx_pwr_all; } static int rtl8723bu_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rtl8xxxu_priv *priv = container_of(led_cdev, struct rtl8xxxu_priv, led_cdev); u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2); ledcfg &= LEDCFG2_DPDT_SELECT; if (brightness == LED_OFF) ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE; else if (brightness == LED_ON) ledcfg |= LEDCFG2_SW_LED_CONTROL; else if (brightness == RTL8XXXU_HW_LED_CONTROL) ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE; rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg); return 0; } struct rtl8xxxu_fileops rtl8723bu_fops = { .identify_chip = rtl8723bu_identify_chip, .parse_efuse = rtl8723bu_parse_efuse, .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723bu_power_on, .power_off = rtl8723bu_power_off, .read_efuse = rtl8xxxu_read_efuse, .reset_8051 = rtl8723bu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8723bu_init_phy_bb, .init_phy_rf = rtl8723bu_init_phy_rf, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_lc_calibrate = rtl8723a_phy_lc_calibrate, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8xxxu_gen2_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .parse_phystats = rtl8723au_rx_parse_phystats, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .init_burst = rtl8xxxu_init_burst, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8xxxu_gen2_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8xxxu_gen2_report_connect, .report_rssi = rtl8xxxu_gen2_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8723a_set_crystal_cap, .cck_rssi = rtl8723b_cck_rssi, .led_classdev_brightness_set = rtl8723bu_led_brightness_set, .writeN_block_size = 1024, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .has_s0s1 = 1, .has_tx_report = 1, .gen2_thermal_meter = 1, .needs_full_init = 1, .init_reg_hmtfr = 1, .ampdu_max_time = 0x5e, .ustime_tsf_edca = 0x50, .max_aggr_num = 0x0c14, .supports_ap = 1, .max_macid_num = 128, .max_sec_cam_num = 64, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, .adda_2t_path_on_b = 0x01c00014, .trxff_boundary = 0x3f7f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8723b_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8723B, .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B, .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B, .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B, }; |
| 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 | // SPDX-License-Identifier: GPL-2.0-only /* * scsi.c Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * Copyright (C) 2002, 2003 Christoph Hellwig * * generic mid-level SCSI driver * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * <drew@colorado.edu> * * Bug correction thanks go to : * Rik Faith <faith@cs.unc.edu> * Tommy Thorn <tthorn> * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> * * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to * add scatter-gather, multiple outstanding request, and other * enhancements. * * Native multichannel, wide scsi, /proc/scsi and hot plugging * support added by Michael Neuffer <mike@i-connect.net> * * Added request_module("scsi_hostadapter") for kerneld: * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) * Bjorn Ekwall <bj0rn@blox.se> * (changed to kmod) * * Major improvements to the timeout, abort, and reset processing, * as well as performance modifications for large queue depths by * Leonard N. Zubkoff <lnz@dandelion.com> * * Converted cli() code to spinlocks, Ingo Molnar * * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli * * out_of_space hacks, D. Gilbert (dpg) 990608 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/kmod.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "scsi_priv.h" #include "scsi_logging.h" #define CREATE_TRACE_POINTS #include <trace/events/scsi.h> /* * Definitions and constants. */ /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. */ unsigned int scsi_logging_level; #if defined(CONFIG_SCSI_LOGGING) EXPORT_SYMBOL(scsi_logging_level); #endif #ifdef CONFIG_SCSI_LOGGING void scsi_log_send(struct scsi_cmnd *cmd) { unsigned int level; /* * If ML QUEUE log level is greater than or equal to: * * 1: nothing (match completion) * * 2: log opcode + command of all commands + cmd address * * 3: same as 2 * * 4: same as 3 */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS); if (level > 1) { scmd_printk(KERN_INFO, cmd, "Send: scmd 0x%p\n", cmd); scsi_print_command(cmd); } } } void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { unsigned int level; /* * If ML COMPLETE log level is greater than or equal to: * * 1: log disposition, result, opcode + command, and conditionally * sense data for failures or non SUCCESS dispositions. * * 2: same as 1 but for all command completions. * * 3: same as 2 * * 4: same as 3 plus dump extra junk */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS); if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { scsi_print_result(cmd, "Done", disposition); scsi_print_command(cmd); if (scsi_status_is_check_condition(cmd->result)) scsi_print_sense(cmd); if (level > 3) scmd_printk(KERN_INFO, cmd, "scsi host busy %d failed %d\n", scsi_host_busy(cmd->device->host), cmd->device->host->host_failed); } } } #endif /** * scsi_finish_command - cleanup and pass command back to upper layer * @cmd: the command * * Description: Pass command off to upper layer for finishing of I/O * request, waking processes that are waiting on results, * etc. */ void scsi_finish_command(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct scsi_target *starget = scsi_target(sdev); struct Scsi_Host *shost = sdev->host; struct scsi_driver *drv; unsigned int good_bytes; scsi_device_unbusy(sdev, cmd); /* * Clear the flags that say that the device/target/host is no longer * capable of accepting new commands. */ if (atomic_read(&shost->host_blocked)) atomic_set(&shost->host_blocked, 0); if (atomic_read(&starget->target_blocked)) atomic_set(&starget->target_blocked, 0); if (atomic_read(&sdev->device_blocked)) atomic_set(&sdev->device_blocked, 0); SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, "Notifying upper driver of completion " "(result %x)\n", cmd->result)); good_bytes = scsi_bufflen(cmd); if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) good_bytes = drv->done(cmd); /* * USB may not give sense identifying bad sector and * simply return a residue instead, so subtract off the * residue if drv->done() error processing indicates no * change to the completion length. */ if (good_bytes == old_good_bytes) good_bytes -= scsi_get_resid(cmd); } scsi_io_completion(cmd, good_bytes); } /* * 4096 is big enough for saturating fast SCSI LUNs. */ int scsi_device_max_queue_depth(struct scsi_device *sdev) { return min_t(int, sdev->host->can_queue, 4096); } /** * scsi_change_queue_depth - change a device's queue depth * @sdev: SCSI Device in question * @depth: number of commands allowed to be queued to the driver * * Sets the device queue depth and returns the new value. */ int scsi_change_queue_depth(struct scsi_device *sdev, int depth) { if (!sdev->budget_map.map) return -EINVAL; depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); if (depth > 0) { sdev->queue_depth = depth; wmb(); } if (sdev->request_queue) blk_set_queue_depth(sdev->request_queue, depth); sbitmap_resize(&sdev->budget_map, sdev->queue_depth); return sdev->queue_depth; } EXPORT_SYMBOL(scsi_change_queue_depth); /** * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth * @sdev: SCSI Device in question * @depth: Current number of outstanding SCSI commands on this device, * not counting the one returned as QUEUE_FULL. * * Description: This function will track successive QUEUE_FULL events on a * specific SCSI device to determine if and when there is a * need to adjust the queue depth on the device. * * Returns: * * 0 - No change needed * * >0 - Adjust queue depth to this new depth, * * -1 - Drop back to untagged operation using host->cmd_per_lun as the * untagged command depth * * Lock Status: None held on entry * * Notes: Low level drivers may call this at any time and we will do * "The Right Thing." We are interrupt context safe. */ int scsi_track_queue_full(struct scsi_device *sdev, int depth) { if (!sdev->budget_map.map) return 0; /* * Don't let QUEUE_FULLs on the same * jiffies count, they could all be from * same event. */ if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) return 0; sdev->last_queue_full_time = jiffies; if (sdev->last_queue_full_depth != depth) { sdev->last_queue_full_count = 1; sdev->last_queue_full_depth = depth; } else { sdev->last_queue_full_count++; } if (sdev->last_queue_full_count <= 10) return 0; return scsi_change_queue_depth(sdev, depth); } EXPORT_SYMBOL(scsi_track_queue_full); /** * scsi_vpd_inquiry - Request a device provide us with a VPD page * @sdev: The device to ask * @buffer: Where to put the result * @page: Which Vital Product Data to return * @len: The length of the buffer * * This is an internal helper function. You probably want to use * scsi_get_vpd_page instead. * * Returns size of the vpd page on success or a negative error number. */ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, u8 page, unsigned len) { int result; unsigned char cmd[16]; if (len < 4) return -EINVAL; cmd[0] = INQUIRY; cmd[1] = 1; /* EVPD */ cmd[2] = page; cmd[3] = len >> 8; cmd[4] = len & 0xff; cmd[5] = 0; /* Control byte */ /* * I'm not convinced we need to try quite this hard to get VPD, but * all the existing users tried this hard. */ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, 30 * HZ, 3, NULL); if (result) return -EIO; /* * Sanity check that we got the page back that we asked for and that * the page size is not 0. */ if (buffer[1] != page) return -EIO; result = get_unaligned_be16(&buffer[2]); if (!result) return -EIO; return result + 4; } enum scsi_vpd_parameters { SCSI_VPD_HEADER_SIZE = 4, SCSI_VPD_LIST_SIZE = 36, }; static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) { unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4); int result; if (sdev->no_vpd_size) return SCSI_DEFAULT_VPD_LEN; /* * Fetch the supported pages VPD and validate that the requested page * number is present. */ if (page != 0) { result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd)); if (result < SCSI_VPD_HEADER_SIZE) return 0; if (result > sizeof(vpd)) { dev_warn_once(&sdev->sdev_gendev, "%s: long VPD page 0 length: %d bytes\n", __func__, result); result = sizeof(vpd); } result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; } /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices * which can not handle allocation lengths as large as * potentially requested by the caller. */ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE); if (result < 0) return 0; if (result < SCSI_VPD_HEADER_SIZE) { dev_warn_once(&sdev->sdev_gendev, "%s: short VPD page 0x%02x length: %d bytes\n", __func__, page, result); return 0; } return result; } /** * scsi_get_vpd_page - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * @buf: where to store the VPD * @buf_len: number of bytes in the VPD buffer area * * SCSI devices may optionally supply Vital Product Data. Each 'page' * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). * If the device supports this VPD page, this routine fills @buf * with the data from that page and return 0. If the VPD page is not * supported or its content cannot be retrieved, -EINVAL is returned. */ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, int buf_len) { int result, vpd_len; if (!scsi_device_supports_vpd(sdev)) return -EINVAL; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return -EINVAL; vpd_len = min(vpd_len, buf_len); /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ memset(buf, 0, buf_len); result = scsi_vpd_inquiry(sdev, buf, page, vpd_len); if (result < 0) return -EINVAL; else if (result > vpd_len) dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); return 0; } EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * * Returns %NULL upon failure. */ static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) { struct scsi_vpd *vpd_buf; int vpd_len, result; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return NULL; retry_pg: /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); if (!vpd_buf) return NULL; result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); if (result < 0) { kfree(vpd_buf); return NULL; } if (result > vpd_len) { dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); vpd_len = result; kfree(vpd_buf); goto retry_pg; } vpd_buf->len = result; return vpd_buf; } static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, struct scsi_vpd __rcu **sdev_vpd_buf) { struct scsi_vpd *vpd_buf; vpd_buf = scsi_get_vpd_buf(sdev, page); if (!vpd_buf) return; mutex_lock(&sdev->inquiry_mutex); vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_buf) kfree_rcu(vpd_buf, rcu); } /** * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure * @sdev: The device to ask * * Attach the 'Device Identification' VPD page (0x83) and the * 'Unit Serial Number' VPD page (0x80) to a SCSI device * structure. This information can be used to identify the device * uniquely. */ void scsi_attach_vpd(struct scsi_device *sdev) { int i; struct scsi_vpd *vpd_buf; if (!scsi_device_supports_vpd(sdev)) return; /* Ask for all the pages supported by this device */ vpd_buf = scsi_get_vpd_buf(sdev, 0); if (!vpd_buf) return; for (i = 4; i < vpd_buf->len; i++) { switch (vpd_buf->data[i]) { case 0x0: scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); break; case 0x80: scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); break; case 0x83: scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); break; case 0x89: scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); break; case 0xb0: scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); break; case 0xb1: scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); break; case 0xb2: scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); break; case 0xb7: scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7); break; default: break; } } kfree(vpd_buf); } /** * scsi_report_opcode - Find out if a given command is supported * @sdev: scsi device to query * @buffer: scratch buffer (must be at least 20 bytes long) * @len: length of buffer * @opcode: opcode for the command to look up * @sa: service action for the command to look up * * Uses the REPORT SUPPORTED OPERATION CODES to check support for the * command identified with @opcode and @sa. If the command does not * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, * 0 if the command is not supported and 1 if the device claims to * support the command. */ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode, unsigned short sa) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; int result, request_len; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) return -EINVAL; /* RSOC header + size of command we are asking about */ request_len = 4 + COMMAND_SIZE(opcode); if (request_len > len) { dev_warn_once(&sdev->sdev_gendev, "%s: len %u bytes, opcode 0x%02x needs %u\n", __func__, len, opcode, request_len); return -EINVAL; } memset(cmd, 0, 16); cmd[0] = MAINTENANCE_IN; cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; if (!sa) { cmd[2] = 1; /* One command format */ cmd[3] = opcode; } else { cmd[2] = 3; /* One command format with service action */ cmd[3] = opcode; put_unaligned_be16(sa, &cmd[4]); } put_unaligned_be32(request_len, &cmd[6]); memset(buffer, 0, len); result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, request_len, 30 * HZ, 3, &exec_args); if (result < 0) return result; if (result && scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST && (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) return -EINVAL; if ((buffer[1] & 3) == 3) /* Command supported */ return 1; return 0; } EXPORT_SYMBOL(scsi_report_opcode); #define SCSI_CDL_CHECK_BUF_LEN 64 static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, unsigned char *buf) { int ret; u8 cdlp; /* Check operation code */ ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); if (ret <= 0) return false; if ((buf[1] & 0x03) != 0x03) return false; /* * See SPC-6, One_command parameter data format for * REPORT SUPPORTED OPERATION CODES. We have the following cases * depending on rwcdlp (buf[0] & 0x01) value: * - rwcdlp == 0: then cdlp indicates support for the A mode page when * it is equal to 1 and for the B mode page when it is * equal to 2. * - rwcdlp == 1: then cdlp indicates support for the T2A mode page * when it is equal to 1 and for the T2B mode page when * it is equal to 2. * Overall, to detect support for command duration limits, we only need * to check that cdlp is 1 or 2. */ cdlp = (buf[1] & 0x18) >> 3; return cdlp == 0x01 || cdlp == 0x02; } /** * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits * @sdev: The device to check */ void scsi_cdl_check(struct scsi_device *sdev) { bool cdl_supported; unsigned char *buf; /* * Support for CDL was defined in SPC-5. Ignore devices reporting an * lower SPC version. This also avoids problems with old drives choking * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a * service action specified, as done in scsi_cdl_check_cmd(). */ if (sdev->scsi_level < SCSI_SPC_5) { sdev->cdl_supported = 0; return; } buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); if (!buf) { sdev->cdl_supported = 0; return; } /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ cdl_supported = scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); if (cdl_supported) { /* * We have CDL support: force the use of READ16/WRITE16. * READ32 and WRITE32 will be used for devices that support * the T10_PI_TYPE2_PROTECTION protection type. */ sdev->use_16_for_rw = 1; sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; /* * If the device supports CDL, make sure that the current drive * feature status is consistent with the user controlled * cdl_enable state. */ scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } kfree(buf); } /** * scsi_cdl_enable - Enable or disable a SCSI device supports for Command * Duration Limits * @sdev: The target device * @enable: the target state */ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { char buf[64]; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ if (sdev->is_ata) { struct scsi_mode_data data; struct scsi_sense_hdr sshdr; char *buf_data; int len; ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), 5 * HZ, 3, &data, NULL); if (ret) return -EINVAL; /* Enable or disable CDL using the ATA feature page */ len = min_t(size_t, sizeof(buf), data.length - data.header_length - data.block_descriptor_length); buf_data = buf + data.header_length + data.block_descriptor_length; /* * If we want to enable CDL and CDL is already enabled on the * device, do nothing. This avoids needlessly resetting the CDL * statistics on the device as that is implied by the CDL enable * action. Similar to this, there is no need to do anything if * we want to disable CDL and CDL is already disabled. */ if (enable) { if ((buf_data[4] & 0x03) == 0x02) goto out; buf_data[4] &= ~0x03; buf_data[4] |= 0x02; } else { if ((buf_data[4] & 0x03) == 0x00) goto out; buf_data[4] &= ~0x03; } ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, &data, &sshdr); if (ret) { if (ret > 0 && scsi_sense_valid(&sshdr)) scsi_print_sense_hdr(sdev, dev_name(&sdev->sdev_gendev), &sshdr); return ret; } } out: sdev->cdl_enable = enable; return 0; } /** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to * * Description: Gets a reference to the scsi_device and increments the use count * of the underlying LLDD module. You must hold host_lock of the * parent Scsi_Host or already have a reference when calling this. * * This will fail if a device is deleted or cancelled, or when the LLD module * is in the process of being unloaded. */ int scsi_device_get(struct scsi_device *sdev) { if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; if (!try_module_get(sdev->host->hostt->module)) goto fail; if (!get_device(&sdev->sdev_gendev)) goto fail_put_module; return 0; fail_put_module: module_put(sdev->host->hostt->module); fail: return -ENXIO; } EXPORT_SYMBOL(scsi_device_get); /** * scsi_device_put - release a reference to a scsi_device * @sdev: device to release a reference on. * * Description: Release a reference to the scsi_device and decrements the use * count of the underlying LLDD module. The device is freed once the last * user vanishes. */ void scsi_device_put(struct scsi_device *sdev) { struct module *mod = sdev->host->hostt->module; put_device(&sdev->sdev_gendev); module_put(mod); } EXPORT_SYMBOL(scsi_device_put); /* helper for shost_for_each_device, see that for documentation */ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, struct scsi_device *prev) { struct list_head *list = (prev ? &prev->siblings : &shost->__devices); struct scsi_device *next = NULL; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); while (list->next != &shost->__devices) { next = list_entry(list->next, struct scsi_device, siblings); /* * Skip pseudo devices and also devices we can't get a * reference to. */ if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next)) break; next = NULL; list = list->next; } spin_unlock_irqrestore(shost->host_lock, flags); if (prev) scsi_device_put(prev); return next; } EXPORT_SYMBOL(__scsi_iterate_devices); /** * starget_for_each_device - helper to walk all devices of a target * @starget: target whose devices we want to iterate over. * @data: Opaque passed to each function call. * @fn: Function to call on each device * * This traverses over each device of @starget. The devices have * a reference that must be released by scsi_host_put when breaking * out of the loop. */ void starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(starget_for_each_device); /** * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) * @starget: target whose devices we want to iterate over. * @data: parameter for callback @fn() * @fn: callback function that is invoked for each device * * This traverses over each device of @starget. It does _not_ * take a reference on the scsi_device, so the whole loop must be * protected by shost->host_lock. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use starget_for_each_device instead. **/ void __starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; __shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(__starget_for_each_device); /** * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and * any access to the returned scsi_device. A scsi_device in state * SDEV_DEL is skipped. * * Note: The only reason why drivers should use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup_by_target instead. **/ struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &starget->devices, same_target_siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup_by_target); /** * scsi_device_lookup_by_target - find a device given the target * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup_by_target(starget, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup_by_target); /** * __scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and any access * to the returned scsi_device. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup instead. **/ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->channel == channel && sdev->id == id && sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup); /** * scsi_device_lookup - find a device given the host * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup(shost, channel, id, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup); MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); static int __init init_scsi(void) { int error; error = scsi_init_procfs(); if (error) goto cleanup_queue; error = scsi_init_devinfo(); if (error) goto cleanup_procfs; error = scsi_init_hosts(); if (error) goto cleanup_devlist; error = scsi_init_sysctl(); if (error) goto cleanup_hosts; error = scsi_sysfs_register(); if (error) goto cleanup_sysctl; scsi_netlink_init(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; cleanup_sysctl: scsi_exit_sysctl(); cleanup_hosts: scsi_exit_hosts(); cleanup_devlist: scsi_exit_devinfo(); cleanup_procfs: scsi_exit_procfs(); cleanup_queue: scsi_exit_queue(); printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", -error); return error; } static void __exit exit_scsi(void) { scsi_netlink_exit(); scsi_sysfs_unregister(); scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); scsi_exit_procfs(); scsi_exit_queue(); } subsys_initcall(init_scsi); module_exit(exit_scsi); |
| 1 1 4 3 3 3 3 3 3 3 3 17 4 17 17 16 4 4 17 1 16 12 2 10 14 1 17 13 2 11 2 1 12 2 17 17 16 11 11 11 16 11 11 11 11 1 1 2 1 1 1 1 3 1 2 3 1 2 16 16 16 16 2 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB Wacom tablet support - Wacom specific code */ #include "wacom_wac.h" #include "wacom.h" #include <linux/input/mt.h> #include <linux/jiffies.h> /* resolution for penabled devices */ #define WACOM_PL_RES 20 #define WACOM_PENPRTN_RES 40 #define WACOM_VOLITO_RES 50 #define WACOM_GRAPHIRE_RES 80 #define WACOM_INTUOS_RES 100 #define WACOM_INTUOS3_RES 200 /* Newer Cintiq and DTU have an offset between tablet and screen areas */ #define WACOM_DTU_OFFSET 200 #define WACOM_CINTIQ_OFFSET 400 /* * Scale factor relating reported contact size to logical contact area. * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo */ #define WACOM_CONTACT_AREA_SCALE 2607 static bool touch_arbitration = 1; module_param(touch_arbitration, bool, 0644); MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)"); static void wacom_report_numbered_buttons(struct input_dev *input_dev, int button_count, int mask); static int wacom_numbered_button_to_key(int n); static void wacom_update_led(struct wacom *wacom, int button_count, int mask, int group); static void wacom_force_proxout(struct wacom_wac *wacom_wac) { struct input_dev *input = wacom_wac->pen_input; wacom_wac->shared->stylus_in_proximity = 0; input_report_key(input, BTN_TOUCH, 0); input_report_key(input, BTN_STYLUS, 0); input_report_key(input, BTN_STYLUS2, 0); input_report_key(input, BTN_STYLUS3, 0); input_report_key(input, wacom_wac->tool[0], 0); if (wacom_wac->serial[0]) { input_report_abs(input, ABS_MISC, 0); } input_report_abs(input, ABS_PRESSURE, 0); wacom_wac->tool[0] = 0; wacom_wac->id[0] = 0; wacom_wac->serial[0] = 0; input_sync(input); } void wacom_idleprox_timeout(struct timer_list *list) { struct wacom *wacom = timer_container_of(wacom, list, idleprox_timer); struct wacom_wac *wacom_wac = &wacom->wacom_wac; if (!wacom_wac->hid_data.sense_state) { return; } hid_warn(wacom->hdev, "%s: tool appears to be hung in-prox. forcing it out.\n", __func__); wacom_force_proxout(wacom_wac); } /* * Percent of battery capacity for Graphire. * 8th value means AC online and show 100% capacity. */ static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 }; /* * Percent of battery capacity for Intuos4 WL, AC has a separate bit. */ static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 }; static void __wacom_notify_battery(struct wacom_battery *battery, int bat_status, int bat_capacity, bool bat_charging, bool bat_connected, bool ps_connected) { bool changed = battery->bat_status != bat_status || battery->battery_capacity != bat_capacity || battery->bat_charging != bat_charging || battery->bat_connected != bat_connected || battery->ps_connected != ps_connected; if (changed) { battery->bat_status = bat_status; battery->battery_capacity = bat_capacity; battery->bat_charging = bat_charging; battery->bat_connected = bat_connected; battery->ps_connected = ps_connected; if (battery->battery) power_supply_changed(battery->battery); } } static void wacom_notify_battery(struct wacom_wac *wacom_wac, int bat_status, int bat_capacity, bool bat_charging, bool bat_connected, bool ps_connected) { struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); bool bat_initialized = wacom->battery.battery; bool has_quirk = wacom_wac->features.quirks & WACOM_QUIRK_BATTERY; if (bat_initialized != has_quirk) wacom_schedule_work(wacom_wac, WACOM_WORKER_BATTERY); __wacom_notify_battery(&wacom->battery, bat_status, bat_capacity, bat_charging, bat_connected, ps_connected); } static int wacom_penpartner_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; switch (data[0]) { case 1: if (data[5] & 0x80) { wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID; input_report_key(input, wacom->tool[0], 1); input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3])); input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127); input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -127)); input_report_key(input, BTN_STYLUS, (data[5] & 0x40)); } else { input_report_key(input, wacom->tool[0], 0); input_report_abs(input, ABS_MISC, 0); /* report tool id */ input_report_abs(input, ABS_PRESSURE, -1); input_report_key(input, BTN_TOUCH, 0); } break; case 2: input_report_key(input, BTN_TOOL_PEN, 1); input_report_abs(input, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3])); input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127); input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20)); input_report_key(input, BTN_STYLUS, (data[5] & 0x40)); break; default: dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); return 0; } return 1; } static int wacom_pl_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; int prox, pressure; if (data[0] != WACOM_REPORT_PENABLED) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); return 0; } prox = data[1] & 0x40; if (!wacom->id[0]) { if ((data[0] & 0x10) || (data[4] & 0x20)) { wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; } else { wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; } } /* If the eraser is in prox, STYLUS2 is always set. If we * mis-detected the type and notice that STYLUS2 isn't set * then force the eraser out of prox and let the pen in. */ if (wacom->tool[0] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) { input_report_key(input, BTN_TOOL_RUBBER, 0); input_report_abs(input, ABS_MISC, 0); input_sync(input); wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; } if (prox) { pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); if (features->pressure_max > 255) pressure = (pressure << 1) | ((data[4] >> 6) & 1); pressure += (features->pressure_max + 1) / 2; input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); input_report_abs(input, ABS_PRESSURE, pressure); input_report_key(input, BTN_TOUCH, data[4] & 0x08); input_report_key(input, BTN_STYLUS, data[4] & 0x10); /* Only allow the stylus2 button to be reported for the pen tool. */ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); } if (!prox) wacom->id[0] = 0; input_report_key(input, wacom->tool[0], prox); input_report_abs(input, ABS_MISC, wacom->id[0]); return 1; } static int wacom_ptu_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; if (data[0] != WACOM_REPORT_PENABLED) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); return 0; } if (data[1] & 0x04) { input_report_key(input, BTN_TOOL_RUBBER, data[1] & 0x20); input_report_key(input, BTN_TOUCH, data[1] & 0x08); wacom->id[0] = ERASER_DEVICE_ID; } else { input_report_key(input, BTN_TOOL_PEN, data[1] & 0x20); input_report_key(input, BTN_TOUCH, data[1] & 0x01); wacom->id[0] = STYLUS_DEVICE_ID; } input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6])); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); return 1; } static int wacom_dtu_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; int prox = data[1] & 0x20; dev_dbg(input->dev.parent, "%s: received report #%d", __func__, data[0]); if (prox) { /* Going into proximity select tool */ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; if (wacom->tool[0] == BTN_TOOL_PEN) wacom->id[0] = STYLUS_DEVICE_ID; else wacom->id[0] = ERASER_DEVICE_ID; } input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x01) << 8) | data[6]); input_report_key(input, BTN_TOUCH, data[1] & 0x05); if (!prox) /* out-prox */ wacom->id[0] = 0; input_report_key(input, wacom->tool[0], prox); input_report_abs(input, ABS_MISC, wacom->id[0]); return 1; } static int wacom_dtus_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; unsigned short prox, pressure = 0; if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) { dev_dbg(input->dev.parent, "%s: received unknown report #%d", __func__, data[0]); return 0; } else if (data[0] == WACOM_REPORT_DTUSPAD) { input = wacom->pad_input; input_report_key(input, BTN_0, (data[1] & 0x01)); input_report_key(input, BTN_1, (data[1] & 0x02)); input_report_key(input, BTN_2, (data[1] & 0x04)); input_report_key(input, BTN_3, (data[1] & 0x08)); input_report_abs(input, ABS_MISC, data[1] & 0x0f ? PAD_DEVICE_ID : 0); return 1; } else { prox = data[1] & 0x80; if (prox) { switch ((data[1] >> 3) & 3) { case 1: /* Rubber */ wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; break; case 2: /* Pen */ wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; break; } } input_report_key(input, BTN_STYLUS, data[1] & 0x20); input_report_key(input, BTN_STYLUS2, data[1] & 0x40); input_report_abs(input, ABS_X, get_unaligned_be16(&data[3])); input_report_abs(input, ABS_Y, get_unaligned_be16(&data[5])); pressure = ((data[1] & 0x03) << 8) | (data[2] & 0xff); input_report_abs(input, ABS_PRESSURE, pressure); input_report_key(input, BTN_TOUCH, pressure > 10); if (!prox) /* out-prox */ wacom->id[0] = 0; input_report_key(input, wacom->tool[0], prox); input_report_abs(input, ABS_MISC, wacom->id[0]); return 1; } } static int wacom_graphire_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; struct input_dev *pad_input = wacom->pad_input; int battery_capacity, ps_connected; int prox; int rw = 0; int retval = 0; if (features->type == GRAPHIRE_BT) { if (data[0] != WACOM_REPORT_PENABLED_BT) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); goto exit; } } else if (data[0] != WACOM_REPORT_PENABLED) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); goto exit; } prox = data[1] & 0x80; if (prox || wacom->id[0]) { if (prox) { switch ((data[1] >> 5) & 3) { case 0: /* Pen */ wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; break; case 1: /* Rubber */ wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; break; case 2: /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); fallthrough; case 3: /* Mouse without wheel */ wacom->tool[0] = BTN_TOOL_MOUSE; wacom->id[0] = CURSOR_DEVICE_ID; break; } } input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); if (wacom->tool[0] != BTN_TOOL_MOUSE) { if (features->type == GRAPHIRE_BT) input_report_abs(input, ABS_PRESSURE, data[6] | (((__u16) (data[1] & 0x08)) << 5)); else input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8)); input_report_key(input, BTN_TOUCH, data[1] & 0x01); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x04); } else { input_report_key(input, BTN_LEFT, data[1] & 0x01); input_report_key(input, BTN_RIGHT, data[1] & 0x02); if (features->type == WACOM_G4 || features->type == WACOM_MO) { input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); rw = (data[7] & 0x04) - (data[7] & 0x03); } else if (features->type == GRAPHIRE_BT) { /* Compute distance between mouse and tablet */ rw = 44 - (data[6] >> 2); rw = clamp_val(rw, 0, 31); input_report_abs(input, ABS_DISTANCE, rw); if (((data[1] >> 5) & 3) == 2) { /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); rw = (data[6] & 0x01) ? -1 : (data[6] & 0x02) ? 1 : 0; } else { rw = 0; } } else { input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); rw = -(signed char)data[6]; } input_report_rel(input, REL_WHEEL, rw); } if (!prox) wacom->id[0] = 0; input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_key(input, wacom->tool[0], prox); input_sync(input); /* sync last event */ } /* send pad data */ switch (features->type) { case WACOM_G4: prox = data[7] & 0xf8; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; input_report_key(pad_input, BTN_BACK, (data[7] & 0x40)); input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x80)); rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3); input_report_rel(pad_input, REL_WHEEL, rw); if (!prox) wacom->id[1] = 0; input_report_abs(pad_input, ABS_MISC, wacom->id[1]); retval = 1; } break; case WACOM_MO: prox = (data[7] & 0xf8) || data[8]; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; input_report_key(pad_input, BTN_BACK, (data[7] & 0x08)); input_report_key(pad_input, BTN_LEFT, (data[7] & 0x20)); input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x10)); input_report_key(pad_input, BTN_RIGHT, (data[7] & 0x40)); input_report_abs(pad_input, ABS_WHEEL, (data[8] & 0x7f)); if (!prox) wacom->id[1] = 0; input_report_abs(pad_input, ABS_MISC, wacom->id[1]); retval = 1; } break; case GRAPHIRE_BT: prox = data[7] & 0x03; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; input_report_key(pad_input, BTN_0, (data[7] & 0x02)); input_report_key(pad_input, BTN_1, (data[7] & 0x01)); if (!prox) wacom->id[1] = 0; input_report_abs(pad_input, ABS_MISC, wacom->id[1]); retval = 1; } break; } /* Store current battery capacity and power supply state */ if (features->type == GRAPHIRE_BT) { rw = (data[7] >> 2 & 0x07); battery_capacity = batcap_gr[rw]; ps_connected = rw == 7; wacom_notify_battery(wacom, WACOM_POWER_SUPPLY_STATUS_AUTO, battery_capacity, ps_connected, 1, ps_connected); } exit: return retval; } static void wacom_intuos_schedule_prox_event(struct wacom_wac *wacom_wac) { struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); struct wacom_features *features = &wacom_wac->features; struct hid_report *r; struct hid_report_enum *re; re = &(wacom->hdev->report_enum[HID_FEATURE_REPORT]); if (features->type == INTUOSHT2) r = re->report_id_hash[WACOM_REPORT_INTUOSHT2_ID]; else r = re->report_id_hash[WACOM_REPORT_INTUOS_ID1]; if (r) { hid_hw_request(wacom->hdev, r, HID_REQ_GET_REPORT); } } static int wacom_intuos_pad(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->pad_input; int i; int buttons = 0, nbuttons = features->numbered_buttons; int keys = 0, nkeys = 0; int ring1 = 0, ring2 = 0; int strip1 = 0, strip2 = 0; bool prox = false; bool wrench = false, keyboard = false, mute_touch = false, menu = false, info = false; /* pad packets. Works as a second tool and is always in prox */ if (!(data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD || data[0] == WACOM_REPORT_CINTIQPAD)) return 0; if (features->type >= INTUOS4S && features->type <= INTUOS4L) { buttons = (data[3] << 1) | (data[2] & 0x01); ring1 = data[1]; } else if (features->type == DTK) { buttons = data[6]; } else if (features->type == WACOM_13HD) { buttons = (data[4] << 1) | (data[3] & 0x01); } else if (features->type == WACOM_24HD) { buttons = (data[8] << 8) | data[6]; ring1 = data[1]; ring2 = data[2]; /* * Three "buttons" are available on the 24HD which are * physically implemented as a touchstrip. Each button * is approximately 3 bits wide with a 2 bit spacing. * The raw touchstrip bits are stored at: * ((data[3] & 0x1f) << 8) | data[4]) */ nkeys = 3; keys = ((data[3] & 0x1C) ? 1<<2 : 0) | ((data[4] & 0xE0) ? 1<<1 : 0) | ((data[4] & 0x07) ? 1<<0 : 0); keyboard = !!(data[4] & 0xE0); info = !!(data[3] & 0x1C); if (features->oPid) { mute_touch = !!(data[4] & 0x07); if (mute_touch) wacom->shared->is_touch_on = !wacom->shared->is_touch_on; } else { wrench = !!(data[4] & 0x07); } } else if (features->type == WACOM_27QHD) { nkeys = 3; keys = data[2] & 0x07; wrench = !!(data[2] & 0x01); keyboard = !!(data[2] & 0x02); if (features->oPid) { mute_touch = !!(data[2] & 0x04); if (mute_touch) wacom->shared->is_touch_on = !wacom->shared->is_touch_on; } else { menu = !!(data[2] & 0x04); } input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); } else if (features->type == CINTIQ_HYBRID) { /* * Do not send hardware buttons under Android. They * are already sent to the system through GPIO (and * have different meaning). * * d-pad right -> data[4] & 0x10 * d-pad up -> data[4] & 0x20 * d-pad left -> data[4] & 0x40 * d-pad down -> data[4] & 0x80 * d-pad center -> data[3] & 0x01 */ buttons = (data[4] << 1) | (data[3] & 0x01); } else if (features->type == CINTIQ_COMPANION_2) { /* d-pad right -> data[2] & 0x10 * d-pad up -> data[2] & 0x20 * d-pad left -> data[2] & 0x40 * d-pad down -> data[2] & 0x80 * d-pad center -> data[1] & 0x01 */ buttons = ((data[2] >> 4) << 7) | ((data[1] & 0x04) << 4) | ((data[2] & 0x0F) << 2) | (data[1] & 0x03); } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { /* * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in * addition to the mechanical switch. Switch data is * stored in data[4], capacitive data in data[5]. * * Touch ring mode switch (data[3]) has no capacitive sensor */ buttons = (data[4] << 1) | (data[3] & 0x01); ring1 = data[2]; } else { if (features->type == WACOM_21UX2 || features->type == WACOM_22HD) { buttons = (data[8] << 10) | ((data[7] & 0x01) << 9) | (data[6] << 1) | (data[5] & 0x01); if (features->type == WACOM_22HD) { nkeys = 3; keys = data[9] & 0x07; info = !!(data[9] & 0x01); wrench = !!(data[9] & 0x02); } } else { buttons = ((data[6] & 0x10) << 5) | ((data[5] & 0x10) << 4) | ((data[6] & 0x0F) << 4) | (data[5] & 0x0F); } strip1 = ((data[1] & 0x1f) << 8) | data[2]; strip2 = ((data[3] & 0x1f) << 8) | data[4]; } prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) | (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2; wacom_report_numbered_buttons(input, nbuttons, buttons); for (i = 0; i < nkeys; i++) input_report_key(input, KEY_PROG1 + i, keys & (1 << i)); input_report_key(input, KEY_BUTTONCONFIG, wrench); input_report_key(input, KEY_ONSCREEN_KEYBOARD, keyboard); input_report_key(input, KEY_CONTROLPANEL, menu); input_report_key(input, KEY_INFO, info); if (wacom->shared && wacom->shared->touch_input) { input_report_switch(wacom->shared->touch_input, SW_MUTE_DEVICE, !wacom->shared->is_touch_on); input_sync(wacom->shared->touch_input); } input_report_abs(input, ABS_RX, strip1); input_report_abs(input, ABS_RY, strip2); input_report_abs(input, ABS_WHEEL, (ring1 & 0x80) ? (ring1 & 0x7f) : 0); input_report_abs(input, ABS_THROTTLE, (ring2 & 0x80) ? (ring2 & 0x7f) : 0); input_report_key(input, wacom->tool[1], prox ? 1 : 0); input_report_abs(input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff); return 1; } static int wacom_intuos_id_mangle(int tool_id) { return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF); } static bool wacom_is_art_pen(int tool_id) { bool is_art_pen = false; switch (tool_id) { case 0x885: /* Intuos3 Marker Pen */ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */ case 0x204: /* Art Pen 2 */ is_art_pen = true; break; } return is_art_pen; } static int wacom_intuos_get_tool_type(int tool_id) { switch (tool_id) { case 0x812: /* Inking pen */ case 0x801: /* Intuos3 Inking pen */ case 0x12802: /* Intuos4/5 Inking Pen */ case 0x012: return BTN_TOOL_PENCIL; case 0x832: /* Stroke pen */ case 0x032: return BTN_TOOL_BRUSH; case 0x007: /* Mouse 4D and 2D */ case 0x09c: case 0x094: case 0x017: /* Intuos3 2D Mouse */ case 0x806: /* Intuos4 Mouse */ return BTN_TOOL_MOUSE; case 0x096: /* Lens cursor */ case 0x097: /* Intuos3 Lens cursor */ case 0x006: /* Intuos4 Lens cursor */ return BTN_TOOL_LENS; case 0xd12: case 0x912: case 0x112: case 0x913: /* Intuos3 Airbrush */ case 0x902: /* Intuos4/5 13HD/24HD Airbrush */ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */ return BTN_TOOL_AIRBRUSH; default: if (tool_id & 0x0008) return BTN_TOOL_RUBBER; return BTN_TOOL_PEN; } } static void wacom_exit_report(struct wacom_wac *wacom) { struct input_dev *input = wacom->pen_input; struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; /* * Reset all states otherwise we lose the initial states * when in-prox next time */ input_report_abs(input, ABS_X, 0); input_report_abs(input, ABS_Y, 0); input_report_abs(input, ABS_DISTANCE, 0); input_report_abs(input, ABS_TILT_X, 0); input_report_abs(input, ABS_TILT_Y, 0); if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { input_report_key(input, BTN_LEFT, 0); input_report_key(input, BTN_MIDDLE, 0); input_report_key(input, BTN_RIGHT, 0); input_report_key(input, BTN_SIDE, 0); input_report_key(input, BTN_EXTRA, 0); input_report_abs(input, ABS_THROTTLE, 0); input_report_abs(input, ABS_RZ, 0); } else { input_report_abs(input, ABS_PRESSURE, 0); input_report_key(input, BTN_STYLUS, 0); input_report_key(input, BTN_STYLUS2, 0); input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_WHEEL, 0); if (features->type >= INTUOS3S) input_report_abs(input, ABS_Z, 0); } input_report_key(input, wacom->tool[idx], 0); input_report_abs(input, ABS_MISC, 0); /* reset tool id */ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); wacom->id[idx] = 0; } static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; if (!(((data[1] & 0xfc) == 0xc0) || /* in prox */ ((data[1] & 0xfe) == 0x20) || /* in range */ ((data[1] & 0xfe) == 0x80))) /* out prox */ return 0; /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { /* serial number of the tool */ wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) + (data[4] << 20) + (data[5] << 12) + (data[6] << 4) + (data[7] >> 4); wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8); wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]); wacom->shared->stylus_in_proximity = true; return 1; } /* in Range */ if ((data[1] & 0xfe) == 0x20) { if (features->type != INTUOSHT2) wacom->shared->stylus_in_proximity = true; /* in Range while exiting */ if (wacom->reporting_data) { input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_PRESSURE, 0); input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max); return 2; } return 1; } /* Exit report */ if ((data[1] & 0xfe) == 0x80) { wacom->shared->stylus_in_proximity = false; wacom->reporting_data = false; /* don't report exit if we don't know the ID */ if (!wacom->id[idx]) return 1; wacom_exit_report(wacom); return 2; } return 0; } static inline bool touch_is_muted(struct wacom_wac *wacom_wac) { return wacom_wac->probe_complete && wacom_wac->shared->has_mute_touch_switch && !wacom_wac->shared->is_touch_on; } static inline bool report_touch_events(struct wacom_wac *wacom) { return (touch_arbitration ? !wacom->shared->stylus_in_proximity : 1); } static inline bool delay_pen_events(struct wacom_wac *wacom) { return (wacom->shared->touch_down && touch_arbitration); } static int wacom_intuos_general(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; unsigned char type = (data[1] >> 1) & 0x0F; unsigned int x, y, distance, t; if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_CINTIQ && data[0] != WACOM_REPORT_INTUOS_PEN) return 0; if (delay_pen_events(wacom)) return 1; /* don't report events if we don't know the tool ID */ if (!wacom->id[idx]) { /* but reschedule a read of the current tool */ wacom_intuos_schedule_prox_event(wacom); return 1; } /* * don't report events for invalid data */ /* older I4 styli don't work with new Cintiqs */ if ((!((wacom->id[idx] >> 16) & 0x01) && (features->type == WACOM_21UX2)) || /* Only large Intuos support Lense Cursor */ (wacom->tool[idx] == BTN_TOOL_LENS && (features->type == INTUOS3 || features->type == INTUOS3S || features->type == INTUOS4 || features->type == INTUOS4S || features->type == INTUOS5 || features->type == INTUOS5S || features->type == INTUOSPM || features->type == INTUOSPS)) || /* Cintiq doesn't send data when RDY bit isn't set */ (features->type == CINTIQ && !(data[1] & 0x40))) return 1; x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1); y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1); distance = data[9] >> 2; if (features->type < INTUOS3S) { x >>= 1; y >>= 1; distance >>= 1; } if (features->type == INTUOSHT2) distance = features->distance_max - distance; input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_DISTANCE, distance); switch (type) { case 0x00: case 0x01: case 0x02: case 0x03: /* general pen packet */ t = (data[6] << 3) | ((data[7] & 0xC0) >> 5) | (data[1] & 1); if (features->pressure_max < 2047) t >>= 1; input_report_abs(input, ABS_PRESSURE, t); if (features->type != INTUOSHT2) { input_report_abs(input, ABS_TILT_X, (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); } input_report_key(input, BTN_STYLUS, data[1] & 2); input_report_key(input, BTN_STYLUS2, data[1] & 4); input_report_key(input, BTN_TOUCH, t > 10); break; case 0x0a: /* airbrush second packet */ input_report_abs(input, ABS_WHEEL, (data[6] << 2) | ((data[7] >> 6) & 3)); input_report_abs(input, ABS_TILT_X, (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); break; case 0x05: /* Rotation packet */ if (features->type >= INTUOS3S) { /* I3 marker pen rotation */ t = (data[6] << 3) | ((data[7] >> 5) & 7); t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) : ((t-1) / 2 + 450)) : (450 - t / 2) ; input_report_abs(input, ABS_Z, t); } else { /* 4D mouse 2nd packet */ t = (data[6] << 3) | ((data[7] >> 5) & 7); input_report_abs(input, ABS_RZ, (data[7] & 0x20) ? ((t - 1) / 2) : -t / 2); } break; case 0x04: /* 4D mouse 1st packet */ input_report_key(input, BTN_LEFT, data[8] & 0x01); input_report_key(input, BTN_MIDDLE, data[8] & 0x02); input_report_key(input, BTN_RIGHT, data[8] & 0x04); input_report_key(input, BTN_SIDE, data[8] & 0x20); input_report_key(input, BTN_EXTRA, data[8] & 0x10); t = (data[6] << 2) | ((data[7] >> 6) & 3); input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t); break; case 0x06: /* I4 mouse */ input_report_key(input, BTN_LEFT, data[6] & 0x01); input_report_key(input, BTN_MIDDLE, data[6] & 0x02); input_report_key(input, BTN_RIGHT, data[6] & 0x04); input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7) - ((data[7] & 0x40) >> 6)); input_report_key(input, BTN_SIDE, data[6] & 0x08); input_report_key(input, BTN_EXTRA, data[6] & 0x10); input_report_abs(input, ABS_TILT_X, (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); break; case 0x08: if (wacom->tool[idx] == BTN_TOOL_MOUSE) { /* 2D mouse packet */ input_report_key(input, BTN_LEFT, data[8] & 0x04); input_report_key(input, BTN_MIDDLE, data[8] & 0x08); input_report_key(input, BTN_RIGHT, data[8] & 0x10); input_report_rel(input, REL_WHEEL, (data[8] & 0x01) - ((data[8] & 0x02) >> 1)); /* I3 2D mouse side buttons */ if (features->type >= INTUOS3S && features->type <= INTUOS3L) { input_report_key(input, BTN_SIDE, data[8] & 0x40); input_report_key(input, BTN_EXTRA, data[8] & 0x20); } } else if (wacom->tool[idx] == BTN_TOOL_LENS) { /* Lens cursor packets */ input_report_key(input, BTN_LEFT, data[8] & 0x01); input_report_key(input, BTN_MIDDLE, data[8] & 0x02); input_report_key(input, BTN_RIGHT, data[8] & 0x04); input_report_key(input, BTN_SIDE, data[8] & 0x10); input_report_key(input, BTN_EXTRA, data[8] & 0x08); } break; case 0x07: case 0x09: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: /* unhandled */ break; } input_report_abs(input, ABS_MISC, wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */ input_report_key(input, wacom->tool[idx], 1); input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); wacom->reporting_data = true; return 2; } static int wacom_intuos_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; int result; if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOS_ID1 && data[0] != WACOM_REPORT_INTUOS_ID2 && data[0] != WACOM_REPORT_INTUOSPAD && data[0] != WACOM_REPORT_INTUOS_PEN && data[0] != WACOM_REPORT_CINTIQ && data[0] != WACOM_REPORT_CINTIQPAD && data[0] != WACOM_REPORT_INTUOS5PAD) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); return 0; } /* process pad events */ result = wacom_intuos_pad(wacom); if (result) return result; /* process in/out prox events */ result = wacom_intuos_inout(wacom); if (result) return result - 1; /* process general packets */ result = wacom_intuos_general(wacom); if (result) return result - 1; return 0; } static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) { unsigned char *data = wacom_wac->data; struct input_dev *input; struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); struct wacom_remote *remote = wacom->remote; int bat_charging, bat_percent, touch_ring_mode; __u32 serial; int i, index = -1; unsigned long flags; if (data[0] != WACOM_REPORT_REMOTE) { hid_dbg(wacom->hdev, "%s: received unknown report #%d", __func__, data[0]); return 0; } serial = data[3] + (data[4] << 8) + (data[5] << 16); wacom_wac->id[0] = PAD_DEVICE_ID; spin_lock_irqsave(&remote->remote_lock, flags); for (i = 0; i < WACOM_MAX_REMOTES; i++) { if (remote->remotes[i].serial == serial) { index = i; break; } } if (index < 0 || !remote->remotes[index].registered) goto out; remote->remotes[i].active_time = ktime_get(); input = remote->remotes[index].input; input_report_key(input, BTN_0, (data[9] & 0x01)); input_report_key(input, BTN_1, (data[9] & 0x02)); input_report_key(input, BTN_2, (data[9] & 0x04)); input_report_key(input, BTN_3, (data[9] & 0x08)); input_report_key(input, BTN_4, (data[9] & 0x10)); input_report_key(input, BTN_5, (data[9] & 0x20)); input_report_key(input, BTN_6, (data[9] & 0x40)); input_report_key(input, BTN_7, (data[9] & 0x80)); input_report_key(input, BTN_8, (data[10] & 0x01)); input_report_key(input, BTN_9, (data[10] & 0x02)); input_report_key(input, BTN_A, (data[10] & 0x04)); input_report_key(input, BTN_B, (data[10] & 0x08)); input_report_key(input, BTN_C, (data[10] & 0x10)); input_report_key(input, BTN_X, (data[10] & 0x20)); input_report_key(input, BTN_Y, (data[10] & 0x40)); input_report_key(input, BTN_Z, (data[10] & 0x80)); input_report_key(input, BTN_BASE, (data[11] & 0x01)); input_report_key(input, BTN_BASE2, (data[11] & 0x02)); if (data[12] & 0x80) input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1); else input_report_abs(input, ABS_WHEEL, 0); bat_percent = data[7] & 0x7f; bat_charging = !!(data[7] & 0x80); if (data[9] | data[10] | (data[11] & 0x03) | data[12]) input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); else input_report_abs(input, ABS_MISC, 0); input_event(input, EV_MSC, MSC_SERIAL, serial); input_sync(input); /*Which mode select (LED light) is currently on?*/ touch_ring_mode = (data[11] & 0xC0) >> 6; for (i = 0; i < WACOM_MAX_REMOTES; i++) { if (remote->remotes[i].serial == serial) wacom->led.groups[i].select = touch_ring_mode; } __wacom_notify_battery(&remote->remotes[index].battery, WACOM_POWER_SUPPLY_STATUS_AUTO, bat_percent, bat_charging, 1, bat_charging); out: spin_unlock_irqrestore(&remote->remote_lock, flags); return 0; } static void wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len) { struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); unsigned char *data = wacom_wac->data; struct wacom_remote *remote = wacom->remote; struct wacom_remote_work_data remote_data; unsigned long flags; int i, ret; if (data[0] != WACOM_REPORT_DEVICE_LIST) return; memset(&remote_data, 0, sizeof(struct wacom_remote_work_data)); for (i = 0; i < WACOM_MAX_REMOTES; i++) { int j = i * 6; int serial = (data[j+6] << 16) + (data[j+5] << 8) + data[j+4]; remote_data.remote[i].serial = serial; } spin_lock_irqsave(&remote->remote_lock, flags); ret = kfifo_in(&remote->remote_fifo, &remote_data, sizeof(remote_data)); if (ret != sizeof(remote_data)) { spin_unlock_irqrestore(&remote->remote_lock, flags); hid_err(wacom->hdev, "Can't queue Remote status event.\n"); return; } spin_unlock_irqrestore(&remote->remote_lock, flags); wacom_schedule_work(wacom_wac, WACOM_WORKER_REMOTE); } static int int_dist(int x1, int y1, int x2, int y2) { int x = x2 - x1; int y = y2 - y1; return int_sqrt(x*x + y*y); } static void wacom_intuos_bt_process_data(struct wacom_wac *wacom, unsigned char *data) { memcpy(wacom->data, data, 10); wacom_intuos_irq(wacom); input_sync(wacom->pen_input); if (wacom->pad_input) input_sync(wacom->pad_input); } static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) { u8 *data = kmemdup(wacom->data, len, GFP_KERNEL); int i = 1; unsigned power_raw, battery_capacity, bat_charging, ps_connected; switch (data[0]) { case 0x04: wacom_intuos_bt_process_data(wacom, data + i); i += 10; fallthrough; case 0x03: wacom_intuos_bt_process_data(wacom, data + i); i += 10; wacom_intuos_bt_process_data(wacom, data + i); i += 10; power_raw = data[i]; bat_charging = (power_raw & 0x08) ? 1 : 0; ps_connected = (power_raw & 0x10) ? 1 : 0; battery_capacity = batcap_i4[power_raw & 0x07]; wacom_notify_battery(wacom, WACOM_POWER_SUPPLY_STATUS_AUTO, battery_capacity, bat_charging, battery_capacity || bat_charging, ps_connected); break; default: dev_dbg(wacom->pen_input->dev.parent, "Unknown report: %d,%d size:%zu\n", data[0], data[1], len); break; } kfree(data); return 0; } static int wacom_wac_finger_count_touches(struct wacom_wac *wacom) { struct input_dev *input = wacom->touch_input; unsigned touch_max = wacom->features.touch_max; int count = 0; int i; if (!touch_max) return 0; if (touch_max == 1) return test_bit(BTN_TOUCH, input->key) && report_touch_events(wacom); for (i = 0; i < input->mt->num_slots; i++) { struct input_mt_slot *ps = &input->mt->slots[i]; int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); if (id >= 0) count++; } return count; } static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) { int pen_frame_len, pen_frames; struct input_dev *pen_input = wacom->pen_input; unsigned char *data = wacom->data; int number_of_valid_frames = 0; ktime_t time_interval = 15000000; ktime_t time_packet_received = ktime_get(); int i; if (wacom->features.type == INTUOSP2_BT || wacom->features.type == INTUOSP2S_BT) { wacom->serial[0] = get_unaligned_le64(&data[99]); wacom->id[0] = get_unaligned_le16(&data[107]); pen_frame_len = 14; pen_frames = 7; } else { wacom->serial[0] = get_unaligned_le64(&data[33]); wacom->id[0] = get_unaligned_le16(&data[41]); pen_frame_len = 8; pen_frames = 4; } if (wacom->serial[0] >> 52 == 1) { /* Add back in missing bits of ID for non-USI pens */ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF; } /* number of valid frames */ for (i = 0; i < pen_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; if (valid) number_of_valid_frames++; } if (number_of_valid_frames) { if (wacom->hid_data.time_delayed) time_interval = ktime_get() - wacom->hid_data.time_delayed; time_interval = div_u64(time_interval, number_of_valid_frames); wacom->hid_data.time_delayed = time_packet_received; } for (i = 0; i < number_of_valid_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; bool prox = frame[0] & 0x40; bool range = frame[0] & 0x20; bool invert = frame[0] & 0x10; int frames_number_reversed = number_of_valid_frames - i - 1; ktime_t event_timestamp = time_packet_received - frames_number_reversed * time_interval; if (!valid) continue; if (!prox) { wacom->shared->stylus_in_proximity = false; wacom_exit_report(wacom); input_sync(pen_input); wacom->tool[0] = 0; wacom->id[0] = 0; wacom->serial[0] = 0; wacom->hid_data.time_delayed = 0; return; } if (range) { if (!wacom->tool[0]) { /* first in range */ /* Going into range select tool */ if (invert) wacom->tool[0] = BTN_TOOL_RUBBER; else if (wacom->id[0]) wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]); else wacom->tool[0] = BTN_TOOL_PEN; } input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); if (wacom->features.type == INTUOSP2_BT || wacom->features.type == INTUOSP2S_BT) { /* Fix rotation alignment: userspace expects zero at left */ int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]); rotation += 1800/4; if (rotation > 899) rotation -= 1800; input_report_abs(pen_input, ABS_TILT_X, (signed char)frame[7]); input_report_abs(pen_input, ABS_TILT_Y, (signed char)frame[8]); input_report_abs(pen_input, ABS_Z, rotation); input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); } } if (wacom->tool[0]) { input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); if (wacom->features.type == INTUOSP2_BT || wacom->features.type == INTUOSP2S_BT) { input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max); } else { input_report_abs(pen_input, ABS_DISTANCE, range ? frame[7] : wacom->features.distance_max); } input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09); input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); input_report_key(pen_input, wacom->tool[0], prox); input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); input_report_abs(pen_input, ABS_MISC, wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ } wacom->shared->stylus_in_proximity = prox; /* add timestamp to unpack the frames */ input_set_timestamp(pen_input, event_timestamp); input_sync(pen_input); } } static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom) { const int finger_touch_len = 8; const int finger_frames = 4; const int finger_frame_len = 43; struct input_dev *touch_input = wacom->touch_input; unsigned char *data = wacom->data; int num_contacts_left = 5; int i, j; for (i = 0; i < finger_frames; i++) { unsigned char *frame = &data[i*finger_frame_len + 109]; int current_num_contacts = frame[0] & 0x7F; int contacts_to_send; if (!(frame[0] & 0x80)) continue; /* * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ if (current_num_contacts) wacom->num_contacts_left = current_num_contacts; contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); for (j = 0; j < contacts_to_send; j++) { unsigned char *touch = &frame[j*finger_touch_len + 1]; int slot = input_mt_get_slot_by_key(touch_input, touch[0]); int x = get_unaligned_le16(&touch[2]); int y = get_unaligned_le16(&touch[4]); int w = touch[6] * input_abs_get_res(touch_input, ABS_MT_POSITION_X); int h = touch[7] * input_abs_get_res(touch_input, ABS_MT_POSITION_Y); if (slot < 0) continue; input_mt_slot(touch_input, slot); input_mt_report_slot_state(touch_input, MT_TOOL_FINGER, touch[1] & 0x01); input_report_abs(touch_input, ABS_MT_POSITION_X, x); input_report_abs(touch_input, ABS_MT_POSITION_Y, y); input_report_abs(touch_input, ABS_MT_TOUCH_MAJOR, max(w, h)); input_report_abs(touch_input, ABS_MT_TOUCH_MINOR, min(w, h)); input_report_abs(touch_input, ABS_MT_ORIENTATION, w > h); } input_mt_sync_frame(touch_input); wacom->num_contacts_left -= contacts_to_send; if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); input_sync(touch_input); } } if (wacom->num_contacts_left == 0) { // Be careful that we don't accidentally call input_sync with // only a partial set of fingers of processed input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); input_sync(touch_input); } } static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) { struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int nbuttons = wacom->features.numbered_buttons; int expresskeys = data[282]; int center = (data[281] & 0x40) >> 6; int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; bool prox = expresskeys || center || ringstatus; /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ ring = 71 - ring; ring += 3*72/16; if (ring > 71) ring -= 72; wacom_report_numbered_buttons(pad_input, nbuttons, expresskeys | (center << (nbuttons - 1))); input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); input_event(pad_input, EV_MSC, MSC_SERIAL, 0xffffffff); input_sync(pad_input); } static void wacom_intuos_pro2_bt_battery(struct wacom_wac *wacom) { unsigned char *data = wacom->data; bool chg = data[284] & 0x80; int battery_status = data[284] & 0x7F; wacom_notify_battery(wacom, WACOM_POWER_SUPPLY_STATUS_AUTO, battery_status, chg, 1, chg); } static void wacom_intuos_gen3_bt_pad(struct wacom_wac *wacom) { struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int buttons = data[44]; wacom_report_numbered_buttons(pad_input, 4, buttons); input_report_key(pad_input, wacom->tool[1], buttons ? 1 : 0); input_report_abs(pad_input, ABS_MISC, buttons ? PAD_DEVICE_ID : 0); input_event(pad_input, EV_MSC, MSC_SERIAL, 0xffffffff); input_sync(pad_input); } static void wacom_intuos_gen3_bt_battery(struct wacom_wac *wacom) { unsigned char *data = wacom->data; bool chg = data[45] & 0x80; int battery_status = data[45] & 0x7F; wacom_notify_battery(wacom, WACOM_POWER_SUPPLY_STATUS_AUTO, battery_status, chg, 1, chg); } static int wacom_intuos_pro2_bt_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; if (data[0] != 0x80 && data[0] != 0x81) { dev_dbg(wacom->pen_input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); return 0; } wacom_intuos_pro2_bt_pen(wacom); if (wacom->features.type == INTUOSP2_BT || wacom->features.type == INTUOSP2S_BT) { wacom_intuos_pro2_bt_touch(wacom); wacom_intuos_pro2_bt_pad(wacom); wacom_intuos_pro2_bt_battery(wacom); } else { wacom_intuos_gen3_bt_pad(wacom); wacom_intuos_gen3_bt_battery(wacom); } return 0; } static int wacom_24hdt_irq(struct wacom_wac *wacom) { struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; int current_num_contacts = data[61]; int contacts_to_send = 0; int num_contacts_left = 4; /* maximum contacts per packet */ int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; int y_offset = 2; if (touch_is_muted(wacom) && !wacom->shared->touch_down) return 0; if (wacom->features.type == WACOM_27QHDT) { current_num_contacts = data[63]; num_contacts_left = 10; byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; y_offset = 0; } /* * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ if (current_num_contacts) wacom->num_contacts_left = current_num_contacts; contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); for (i = 0; i < contacts_to_send; i++) { int offset = (byte_per_packet * i) + 1; bool touch = (data[offset] & 0x1) && report_touch_events(wacom); int slot = input_mt_get_slot_by_key(input, data[offset + 1]); if (slot < 0) continue; input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int t_x = get_unaligned_le16(&data[offset + 2]); int t_y = get_unaligned_le16(&data[offset + 4 + y_offset]); input_report_abs(input, ABS_MT_POSITION_X, t_x); input_report_abs(input, ABS_MT_POSITION_Y, t_y); if (wacom->features.type != WACOM_27QHDT) { int c_x = get_unaligned_le16(&data[offset + 4]); int c_y = get_unaligned_le16(&data[offset + 8]); int w = get_unaligned_le16(&data[offset + 10]); int h = get_unaligned_le16(&data[offset + 12]); input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h)); input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y)); input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); input_report_abs(input, ABS_MT_ORIENTATION, w > h); } } } input_mt_sync_frame(input); wacom->num_contacts_left -= contacts_to_send; if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); } return 1; } static int wacom_mt_touch(struct wacom_wac *wacom) { struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; int current_num_contacts = data[2]; int contacts_to_send = 0; int x_offset = 0; /* MTTPC does not support Height and Width */ if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) x_offset = -4; /* * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ if (current_num_contacts) wacom->num_contacts_left = current_num_contacts; /* There are at most 5 contacts per packet */ contacts_to_send = min(5, wacom->num_contacts_left); for (i = 0; i < contacts_to_send; i++) { int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3; bool touch = (data[offset] & 0x1) && report_touch_events(wacom); int id = get_unaligned_le16(&data[offset + 1]); int slot = input_mt_get_slot_by_key(input, id); if (slot < 0) continue; input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = get_unaligned_le16(&data[offset + x_offset + 7]); int y = get_unaligned_le16(&data[offset + x_offset + 9]); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } } input_mt_sync_frame(input); wacom->num_contacts_left -= contacts_to_send; if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); } return 1; } static int wacom_tpc_mt_touch(struct wacom_wac *wacom) { struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; for (i = 0; i < 2; i++) { int p = data[1] & (1 << i); bool touch = p && report_touch_events(wacom); input_mt_slot(input, i); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = le16_to_cpup((__le16 *)&data[i * 2 + 2]) & 0x7fff; int y = le16_to_cpup((__le16 *)&data[i * 2 + 6]) & 0x7fff; input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } } input_mt_sync_frame(input); /* keep touch state for pen event */ wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); return 1; } static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; struct input_dev *input = wacom->touch_input; bool prox = report_touch_events(wacom); int x = 0, y = 0; if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) return 0; if (len == WACOM_PKGLEN_TPC1FG) { prox = prox && (data[0] & 0x01); x = get_unaligned_le16(&data[1]); y = get_unaligned_le16(&data[3]); } else if (len == WACOM_PKGLEN_TPC1FG_B) { prox = prox && (data[2] & 0x01); x = get_unaligned_le16(&data[3]); y = get_unaligned_le16(&data[5]); } else { prox = prox && (data[1] & 0x01); x = le16_to_cpup((__le16 *)&data[2]); y = le16_to_cpup((__le16 *)&data[4]); } if (prox) { input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); } input_report_key(input, BTN_TOUCH, prox); /* keep touch state for pen events */ wacom->shared->touch_down = prox; return 1; } static int wacom_tpc_pen(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; bool prox = data[1] & 0x20; if (!wacom->shared->stylus_in_proximity) /* first in prox */ /* Going into proximity select tool */ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; /* keep pen state for touch events */ wacom->shared->stylus_in_proximity = prox; /* send pen events only when touch is up or forced out * or touch arbitration is off */ if (!delay_pen_events(wacom)) { input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x07) << 8) | data[6]); input_report_key(input, BTN_TOUCH, data[1] & 0x05); input_report_key(input, wacom->tool[0], prox); return 1; } return 0; } static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; if (wacom->pen_input) { dev_dbg(wacom->pen_input->dev.parent, "%s: received report #%d\n", __func__, data[0]); if (len == WACOM_PKGLEN_PENABLED || data[0] == WACOM_REPORT_PENABLED) return wacom_tpc_pen(wacom); } else if (wacom->touch_input) { dev_dbg(wacom->touch_input->dev.parent, "%s: received report #%d\n", __func__, data[0]); switch (len) { case WACOM_PKGLEN_TPC1FG: return wacom_tpc_single_touch(wacom, len); case WACOM_PKGLEN_TPC2FG: return wacom_tpc_mt_touch(wacom); default: switch (data[0]) { case WACOM_REPORT_TPC1FG: case WACOM_REPORT_TPCHID: case WACOM_REPORT_TPCST: case WACOM_REPORT_TPC1FGE: return wacom_tpc_single_touch(wacom, len); case WACOM_REPORT_TPCMT: case WACOM_REPORT_TPCMT2: return wacom_mt_touch(wacom); } } } return 0; } static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage, int value, int num, int denom) { struct input_absinfo *abs = &input->absinfo[usage->code]; int range = (abs->maximum - abs->minimum + 1); value += num*range/denom; if (value > abs->maximum) value -= range; else if (value < abs->minimum) value += range; return value; } int wacom_equivalent_usage(int usage) { if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { int subpage = (usage & 0xFF00) << 8; int subusage = (usage & 0xFF); if (subpage == WACOM_HID_SP_PAD || subpage == WACOM_HID_SP_BUTTON || subpage == WACOM_HID_SP_DIGITIZER || subpage == WACOM_HID_SP_DIGITIZERINFO || usage == WACOM_HID_WD_SENSE || usage == WACOM_HID_WD_SERIALHI || usage == WACOM_HID_WD_TOOLTYPE || usage == WACOM_HID_WD_DISTANCE || usage == WACOM_HID_WD_TOUCHSTRIP || usage == WACOM_HID_WD_TOUCHSTRIP2 || usage == WACOM_HID_WD_TOUCHRING || usage == WACOM_HID_WD_TOUCHRINGSTATUS || usage == WACOM_HID_WD_REPORT_VALID || usage == WACOM_HID_WD_BARRELSWITCH3 || usage == WACOM_HID_WD_SEQUENCENUMBER) { return usage; } if (subpage == HID_UP_UNDEFINED) subpage = HID_UP_DIGITIZER; return subpage | subusage; } if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMTOUCH) { int subpage = (usage & 0xFF00) << 8; int subusage = (usage & 0xFF); if (usage == WACOM_HID_WT_REPORT_VALID) return usage; if (subpage == HID_UP_UNDEFINED) subpage = WACOM_HID_SP_DIGITIZER; return subpage | subusage; } return usage; } static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, struct hid_field *field, __u8 type, __u16 code, int fuzz) { struct wacom *wacom = input_get_drvdata(input); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; int fmin = field->logical_minimum; int fmax = field->logical_maximum; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); int resolution_code = code; int resolution; if (equivalent_usage == HID_DG_TWIST) { resolution_code = ABS_RZ; } resolution = hidinput_calc_abs_res(field, resolution_code); if (equivalent_usage == HID_GD_X) { fmin += features->offset_left; fmax -= features->offset_right; } if (equivalent_usage == HID_GD_Y) { fmin += features->offset_top; fmax -= features->offset_bottom; } usage->type = type; usage->code = code; switch (type) { case EV_ABS: input_set_abs_params(input, code, fmin, fmax, fuzz, 0); /* older tablet may miss physical usage */ if ((code == ABS_X || code == ABS_Y) && !resolution) { resolution = WACOM_INTUOS_RES; hid_warn(input, "Using default resolution for axis type 0x%x code 0x%x\n", type, code); } input_abs_set_res(input, code, resolution); break; case EV_REL: case EV_KEY: case EV_MSC: case EV_SW: input_set_capability(input, type, code); break; } } static void wacom_wac_battery_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { return; } static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); switch (equivalent_usage) { case HID_DG_BATTERYSTRENGTH: if (value == 0) { wacom_wac->hid_data.bat_status = POWER_SUPPLY_STATUS_UNKNOWN; } else { value = value * 100 / (field->logical_maximum - field->logical_minimum); wacom_wac->hid_data.battery_capacity = value; wacom_wac->hid_data.bat_connected = 1; wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO; } wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY; break; case WACOM_HID_WD_BATTERY_LEVEL: value = value * 100 / (field->logical_maximum - field->logical_minimum); wacom_wac->hid_data.battery_capacity = value; wacom_wac->hid_data.bat_connected = 1; wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO; wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY; break; case WACOM_HID_WD_BATTERY_CHARGING: wacom_wac->hid_data.bat_charging = value; wacom_wac->hid_data.ps_connected = value; wacom_wac->hid_data.bat_connected = 1; wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO; wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY; break; } } static void wacom_wac_battery_pre_report(struct hid_device *hdev, struct hid_report *report) { return; } static void wacom_wac_battery_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; int status = wacom_wac->hid_data.bat_status; int capacity = wacom_wac->hid_data.battery_capacity; bool charging = wacom_wac->hid_data.bat_charging; bool connected = wacom_wac->hid_data.bat_connected; bool powered = wacom_wac->hid_data.ps_connected; wacom_notify_battery(wacom_wac, status, capacity, charging, connected, powered); } static void wacom_wac_pad_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; struct input_dev *input = wacom_wac->pad_input; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); switch (equivalent_usage) { case WACOM_HID_WD_ACCELEROMETER_X: __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit); wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_ACCELEROMETER_Y: __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit); wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_ACCELEROMETER_Z: __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit); wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_BUTTONCENTER: case WACOM_HID_WD_BUTTONHOME: case WACOM_HID_WD_BUTTONUP: case WACOM_HID_WD_BUTTONDOWN: case WACOM_HID_WD_BUTTONLEFT: case WACOM_HID_WD_BUTTONRIGHT: wacom_map_usage(input, usage, field, EV_KEY, wacom_numbered_button_to_key(features->numbered_buttons), 0); features->numbered_buttons++; features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_MUTE_DEVICE: /* softkey touch switch */ wacom_wac->is_soft_touch_switch = true; fallthrough; case WACOM_HID_WD_TOUCHONOFF: /* * These two usages, which are used to mute touch events, come * from the pad packet, but are reported on the touch * interface. Because the touch interface may not have * been created yet, we cannot call wacom_map_usage(). In * order to process the usages when we receive them, we set * the usage type and code directly. */ wacom_wac->has_mute_touch_switch = true; usage->type = EV_SW; usage->code = SW_MUTE_DEVICE; break; case WACOM_HID_WD_TOUCHSTRIP: wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHSTRIP2: wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHRING: if (field->flags & HID_MAIN_ITEM_RELATIVE) { wacom_wac->relring_count++; if (wacom_wac->relring_count == 1) { wacom_map_usage(input, usage, field, EV_REL, REL_WHEEL_HI_RES, 0); set_bit(REL_WHEEL, input->relbit); } else if (wacom_wac->relring_count == 2) { wacom_map_usage(input, usage, field, EV_REL, REL_HWHEEL_HI_RES, 0); set_bit(REL_HWHEEL, input->relbit); } } else { wacom_wac->absring_count++; if (wacom_wac->absring_count == 1) wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); else if (wacom_wac->absring_count == 2) wacom_map_usage(input, usage, field, EV_ABS, ABS_THROTTLE, 0); } features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHRINGSTATUS: /* * Only set up type/code association. Completely mapping * this usage may overwrite the axis resolution and range. */ usage->type = EV_ABS; usage->code = ABS_WHEEL; set_bit(EV_ABS, input->evbit); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_BUTTONCONFIG: wacom_map_usage(input, usage, field, EV_KEY, KEY_BUTTONCONFIG, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_ONSCREEN_KEYBOARD: wacom_map_usage(input, usage, field, EV_KEY, KEY_ONSCREEN_KEYBOARD, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_CONTROLPANEL: wacom_map_usage(input, usage, field, EV_KEY, KEY_CONTROLPANEL, 0); features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_MODE_CHANGE: /* do not overwrite previous data */ if (!wacom_wac->has_mode_change) { wacom_wac->has_mode_change = true; wacom_wac->is_direct_mode = true; } features->device_type |= WACOM_DEVICETYPE_PAD; break; } switch (equivalent_usage & 0xfffffff0) { case WACOM_HID_WD_EXPRESSKEY00: wacom_map_usage(input, usage, field, EV_KEY, wacom_numbered_button_to_key(features->numbered_buttons), 0); features->numbered_buttons++; features->device_type |= WACOM_DEVICETYPE_PAD; break; } } static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->pad_input; struct wacom_features *features = &wacom_wac->features; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); int i; bool do_report = false; /* * Avoid reporting this event and setting inrange_state if this usage * hasn't been mapped. */ if (!usage->type && equivalent_usage != WACOM_HID_WD_MODE_CHANGE) return; if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) { bool is_abs_touchring = usage->hid == WACOM_HID_WD_TOUCHRING && !(field->flags & HID_MAIN_ITEM_RELATIVE); if (!is_abs_touchring) wacom_wac->hid_data.inrange_state |= value; } /* Process touch switch state first since it is reported through touch interface, * which is indepentent of pad interface. In the case when there are no other pad * events, the pad interface will not even be created. */ if ((equivalent_usage == WACOM_HID_WD_MUTE_DEVICE) || (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)) { if (wacom_wac->shared->touch_input) { bool *is_touch_on = &wacom_wac->shared->is_touch_on; if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) *is_touch_on = !(*is_touch_on); else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) *is_touch_on = value; input_report_switch(wacom_wac->shared->touch_input, SW_MUTE_DEVICE, !(*is_touch_on)); input_sync(wacom_wac->shared->touch_input); } return; } if (!input) return; switch (equivalent_usage) { case WACOM_HID_WD_TOUCHRING: /* * Userspace expects touchrings to increase in value with * clockwise gestures and have their zero point at the * tablet's left. HID events "should" be clockwise- * increasing and zero at top, though the MobileStudio * Pro and 2nd-gen Intuos Pro don't do this... */ if (hdev->vendor == 0x56a && (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ hdev->product == 0x392 || /* Intuos Pro 2 */ hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ hdev->product == 0x3AA)) { /* MobileStudio Pro */ value = (field->logical_maximum - value); if (hdev->product == 0x357 || hdev->product == 0x358 || hdev->product == 0x392) value = wacom_offset_rotation(input, usage, value, 3, 16); else if (hdev->product == 0x34d || hdev->product == 0x34e || hdev->product == 0x398 || hdev->product == 0x399 || hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } else if (field->flags & HID_MAIN_ITEM_RELATIVE) { int hires_value = value * 120 / usage->resolution_multiplier; int *ring_value; int lowres_code; if (usage->code == REL_WHEEL_HI_RES) { /* We must invert the sign for vertical * relative scrolling. Clockwise * rotation produces positive values * from HW, but userspace treats * positive REL_WHEEL as a scroll *up*! */ hires_value = -hires_value; ring_value = &wacom_wac->hid_data.ring_value; lowres_code = REL_WHEEL; } else if (usage->code == REL_HWHEEL_HI_RES) { /* No need to invert the sign for * horizontal relative scrolling. * Clockwise rotation produces positive * values from HW and userspace treats * positive REL_HWHEEL as a scroll * right. */ ring_value = &wacom_wac->hid_data.ring2_value; lowres_code = REL_HWHEEL; } else { hid_err(wacom->hdev, "unrecognized relative wheel with code %d\n", usage->code); break; } value = hires_value; *ring_value += hires_value; /* Emulate a legacy wheel click for every 120 * units of hi-res travel. */ if (*ring_value >= 120 || *ring_value <= -120) { int clicks = *ring_value / 120; input_event(input, usage->type, lowres_code, clicks); *ring_value -= clicks * 120; } } else { value = wacom_offset_rotation(input, usage, value, 1, 4); } do_report = true; break; case WACOM_HID_WD_TOUCHRINGSTATUS: if (!value) input_event(input, usage->type, usage->code, 0); break; case WACOM_HID_WD_MODE_CHANGE: if (wacom_wac->is_direct_mode != value) { wacom_wac->is_direct_mode = value; wacom_schedule_work(&wacom->wacom_wac, WACOM_WORKER_MODE_CHANGE); } break; case WACOM_HID_WD_BUTTONCENTER: for (i = 0; i < wacom->led.count; i++) wacom_update_led(wacom, features->numbered_buttons, value, i); fallthrough; default: do_report = true; break; } if (do_report) { input_event(input, usage->type, usage->code, value); if (value) wacom_wac->hid_data.pad_input_event_flag = true; } } static void wacom_wac_pad_pre_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; wacom_wac->hid_data.inrange_state = 0; } static void wacom_wac_pad_report(struct hid_device *hdev, struct hid_report *report, struct hid_field *field) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->pad_input; bool active = wacom_wac->hid_data.inrange_state != 0; /* report prox for expresskey events */ if (wacom_wac->hid_data.pad_input_event_flag) { input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0); input_sync(input); if (!active) wacom_wac->hid_data.pad_input_event_flag = false; } } static void wacom_set_barrel_switch3_usage(struct wacom_wac *wacom_wac) { struct input_dev *input = wacom_wac->pen_input; struct wacom_features *features = &wacom_wac->features; if (!(features->quirks & WACOM_QUIRK_AESPEN) && wacom_wac->hid_data.barrelswitch && wacom_wac->hid_data.barrelswitch2 && wacom_wac->hid_data.serialhi && !wacom_wac->hid_data.barrelswitch3) { input_set_capability(input, EV_KEY, BTN_STYLUS3); features->quirks |= WACOM_QUIRK_PEN_BUTTON3; } } static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; struct input_dev *input = wacom_wac->pen_input; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); switch (equivalent_usage) { case HID_GD_X: wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4); break; case HID_GD_Y: wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4); break; case WACOM_HID_WD_DISTANCE: case HID_GD_Z: wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0); break; case HID_DG_TIPPRESSURE: wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0); break; case HID_DG_INRANGE: wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0); break; case HID_DG_INVERT: wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_RUBBER, 0); break; case HID_DG_TILT_X: wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0); break; case HID_DG_TILT_Y: wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0); break; case HID_DG_TWIST: wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0); break; case HID_DG_ERASER: input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER); wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0); break; case HID_DG_TIPSWITCH: input_set_capability(input, EV_KEY, BTN_TOOL_PEN); wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0); break; case HID_DG_BARRELSWITCH: wacom_wac->hid_data.barrelswitch = true; wacom_set_barrel_switch3_usage(wacom_wac); wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS, 0); break; case HID_DG_BARRELSWITCH2: wacom_wac->hid_data.barrelswitch2 = true; wacom_set_barrel_switch3_usage(wacom_wac); wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0); break; case HID_DG_TOOLSERIALNUMBER: features->quirks |= WACOM_QUIRK_TOOLSERIAL; wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0); break; case HID_DG_SCANTIME: wacom_map_usage(input, usage, field, EV_MSC, MSC_TIMESTAMP, 0); break; case WACOM_HID_WD_SENSE: features->quirks |= WACOM_QUIRK_SENSE; wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0); break; case WACOM_HID_WD_SERIALHI: wacom_wac->hid_data.serialhi = true; wacom_set_barrel_switch3_usage(wacom_wac); wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0); break; case WACOM_HID_WD_FINGERWHEEL: input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); break; case WACOM_HID_WD_BARRELSWITCH3: wacom_wac->hid_data.barrelswitch3 = true; wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0); features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3; break; case WACOM_HID_WD_SEQUENCENUMBER: wacom_wac->hid_data.sequence_number = -1; break; } } static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; struct input_dev *input = wacom_wac->pen_input; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); if (wacom_wac->is_invalid_bt_frame) return; switch (equivalent_usage) { case HID_GD_Z: /* * HID_GD_Z "should increase as the control's position is * moved from high to low", while ABS_DISTANCE instead * increases in value as the tool moves from low to high. */ value = field->logical_maximum - value; break; case HID_DG_INRANGE: mod_timer(&wacom->idleprox_timer, jiffies + msecs_to_jiffies(100)); wacom_wac->hid_data.inrange_state = value; if (!(features->quirks & WACOM_QUIRK_SENSE)) wacom_wac->hid_data.sense_state = value; return; case HID_DG_INVERT: wacom_wac->hid_data.eraser |= value; return; case HID_DG_ERASER: wacom_wac->hid_data.eraser |= value; fallthrough; case HID_DG_TIPSWITCH: wacom_wac->hid_data.tipswitch |= value; return; case HID_DG_BARRELSWITCH: wacom_wac->hid_data.barrelswitch = value; return; case HID_DG_BARRELSWITCH2: wacom_wac->hid_data.barrelswitch2 = value; return; case HID_DG_TOOLSERIALNUMBER: if (value) { wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size); } return; case HID_DG_TWIST: /* don't modify the value if the pen doesn't support the feature */ if (!wacom_is_art_pen(wacom_wac->id[0])) return; /* * Userspace expects pen twist to have its zero point when * the buttons/finger is on the tablet's left. HID values * are zero when buttons are toward the top. */ value = wacom_offset_rotation(input, usage, value, 1, 4); break; case WACOM_HID_WD_SENSE: wacom_wac->hid_data.sense_state = value; return; case WACOM_HID_WD_SERIALHI: if (value) { __u32 raw_value = wacom_s32tou(value, field->report_size); wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); wacom_wac->serial[0] |= ((__u64)raw_value) << 32; /* * Non-USI EMR devices may contain additional tool type * information here. See WACOM_HID_WD_TOOLTYPE case for * more details. */ if (value >> 20 == 1) { wacom_wac->id[0] |= raw_value & 0xFFFFF; } } return; case WACOM_HID_WD_TOOLTYPE: /* * Some devices (MobileStudio Pro, and possibly later * devices as well) do not return the complete tool * type in their WACOM_HID_WD_TOOLTYPE usage. Use a * bitwise OR so the complete value can be built * up over time :( */ wacom_wac->id[0] |= wacom_s32tou(value, field->report_size); return; case WACOM_HID_WD_OFFSETLEFT: if (features->offset_left && value != features->offset_left) hid_warn(hdev, "%s: overriding existing left offset " "%d -> %d\n", __func__, value, features->offset_left); features->offset_left = value; return; case WACOM_HID_WD_OFFSETRIGHT: if (features->offset_right && value != features->offset_right) hid_warn(hdev, "%s: overriding existing right offset " "%d -> %d\n", __func__, value, features->offset_right); features->offset_right = value; return; case WACOM_HID_WD_OFFSETTOP: if (features->offset_top && value != features->offset_top) hid_warn(hdev, "%s: overriding existing top offset " "%d -> %d\n", __func__, value, features->offset_top); features->offset_top = value; return; case WACOM_HID_WD_OFFSETBOTTOM: if (features->offset_bottom && value != features->offset_bottom) hid_warn(hdev, "%s: overriding existing bottom offset " "%d -> %d\n", __func__, value, features->offset_bottom); features->offset_bottom = value; return; case WACOM_HID_WD_REPORT_VALID: wacom_wac->is_invalid_bt_frame = !value; return; case WACOM_HID_WD_BARRELSWITCH3: wacom_wac->hid_data.barrelswitch3 = value; return; case WACOM_HID_WD_SEQUENCENUMBER: if (wacom_wac->hid_data.sequence_number != value && wacom_wac->hid_data.sequence_number >= 0) { int sequence_size = field->logical_maximum - field->logical_minimum + 1; int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size; hid_warn(hdev, "Dropped %d packets", drop_count); } wacom_wac->hid_data.sequence_number = value + 1; if (wacom_wac->hid_data.sequence_number > field->logical_maximum) wacom_wac->hid_data.sequence_number = field->logical_minimum; return; } /* send pen events only when touch is up or forced out * or touch arbitration is off */ if (!usage->type || delay_pen_events(wacom_wac)) return; /* send pen events only when the pen is in range */ if (wacom_wac->hid_data.inrange_state) input_event(input, usage->type, usage->code, value); else if (wacom_wac->shared->stylus_in_proximity && !wacom_wac->hid_data.sense_state) input_event(input, usage->type, usage->code, 0); } static void wacom_wac_pen_pre_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; wacom_wac->is_invalid_bt_frame = false; return; } static void wacom_wac_pen_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->pen_input; bool range = wacom_wac->hid_data.inrange_state; bool sense = wacom_wac->hid_data.sense_state; bool entering_range = !wacom_wac->tool[0] && range; if (wacom_wac->is_invalid_bt_frame) return; if (entering_range) { /* first in range */ /* Going into range select tool */ if (wacom_wac->hid_data.eraser) wacom_wac->tool[0] = BTN_TOOL_RUBBER; else if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN) wacom_wac->tool[0] = BTN_TOOL_PEN; else if (wacom_wac->id[0]) wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]); else wacom_wac->tool[0] = BTN_TOOL_PEN; } /* keep pen state for touch events */ wacom_wac->shared->stylus_in_proximity = sense; if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) { int id = wacom_wac->id[0]; if (wacom_wac->features.quirks & WACOM_QUIRK_PEN_BUTTON3) { int sw_state = wacom_wac->hid_data.barrelswitch | (wacom_wac->hid_data.barrelswitch2 << 1); wacom_wac->hid_data.barrelswitch = sw_state == 1; wacom_wac->hid_data.barrelswitch2 = sw_state == 2; wacom_wac->hid_data.barrelswitch3 = sw_state == 3; } input_report_key(input, BTN_STYLUS, wacom_wac->hid_data.barrelswitch); input_report_key(input, BTN_STYLUS2, wacom_wac->hid_data.barrelswitch2); input_report_key(input, BTN_STYLUS3, wacom_wac->hid_data.barrelswitch3); /* * Non-USI EMR tools should have their IDs mangled to * match the legacy behavior of wacom_intuos_general */ if (wacom_wac->serial[0] >> 52 == 1) id = wacom_intuos_id_mangle(id); /* * To ensure compatibility with xf86-input-wacom, we should * report the BTN_TOOL_* event prior to the ABS_MISC or * MSC_SERIAL events. */ input_report_key(input, BTN_TOUCH, wacom_wac->hid_data.tipswitch); input_report_key(input, wacom_wac->tool[0], sense); if (wacom_wac->serial[0]) { /* * xf86-input-wacom does not accept a serial number * of '0'. Report the low 32 bits if possible, but * if they are zero, report the upper ones instead. */ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu; __u32 serial_hi = wacom_wac->serial[0] >> 32; input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi)); input_report_abs(input, ABS_MISC, sense ? id : 0); } wacom_wac->hid_data.tipswitch = false; wacom_wac->hid_data.eraser = false; input_sync(input); } /* Handle AES battery timeout behavior */ if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN) { if (entering_range) cancel_delayed_work(&wacom->aes_battery_work); if (!sense) schedule_delayed_work(&wacom->aes_battery_work, msecs_to_jiffies(WACOM_AES_BATTERY_TIMEOUT)); } if (!sense) { wacom_wac->tool[0] = 0; wacom_wac->id[0] = 0; wacom_wac->serial[0] = 0; } } static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); switch (equivalent_usage) { case HID_GD_X: if (touch_max == 1) wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4); else wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_POSITION_X, 4); break; case HID_GD_Y: if (touch_max == 1) wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4); else wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_POSITION_Y, 4); break; case HID_DG_WIDTH: case HID_DG_HEIGHT: wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MAJOR, 0); wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MINOR, 0); input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0); break; case HID_DG_TIPSWITCH: wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0); break; case HID_DG_CONTACTCOUNT: wacom_wac->hid_data.cc_report = field->report->id; wacom_wac->hid_data.cc_index = field->index; wacom_wac->hid_data.cc_value_index = usage->usage_index; break; case HID_DG_CONTACTID: if ((field->logical_maximum - field->logical_minimum) < touch_max) { /* * The HID descriptor for G11 sensors leaves logical * maximum set to '1' despite it being a multitouch * device. Override to a sensible number. */ field->logical_maximum = 255; } break; case HID_DG_SCANTIME: wacom_map_usage(input, usage, field, EV_MSC, MSC_TIMESTAMP, 0); break; } } static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, struct input_dev *input) { struct hid_data *hid_data = &wacom_wac->hid_data; bool mt = wacom_wac->features.touch_max > 1; bool touch_down = hid_data->tipswitch && hid_data->confidence; bool prox = touch_down && report_touch_events(wacom_wac); if (touch_is_muted(wacom_wac)) { if (!wacom_wac->shared->touch_down) return; prox = false; } wacom_wac->hid_data.num_received++; if (wacom_wac->hid_data.num_received > wacom_wac->hid_data.num_expected) return; if (mt) { int slot; slot = input_mt_get_slot_by_key(input, hid_data->id); if (slot < 0) { return; } else { struct input_mt_slot *ps = &input->mt->slots[slot]; int mt_id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); if (!prox && mt_id < 0) { // No data to send for this slot; short-circuit return; } } input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, prox); } else { input_report_key(input, BTN_TOUCH, prox); } if (prox) { input_report_abs(input, mt ? ABS_MT_POSITION_X : ABS_X, hid_data->x); input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y, hid_data->y); if (test_bit(ABS_MT_TOUCH_MAJOR, input->absbit)) { input_report_abs(input, ABS_MT_TOUCH_MAJOR, max(hid_data->width, hid_data->height)); input_report_abs(input, ABS_MT_TOUCH_MINOR, min(hid_data->width, hid_data->height)); if (hid_data->width != hid_data->height) input_report_abs(input, ABS_MT_ORIENTATION, hid_data->width <= hid_data->height ? 0 : 1); } } } static void wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); struct wacom_features *features = &wacom->wacom_wac.features; if (touch_is_muted(wacom_wac) && !wacom_wac->shared->touch_down) return; if (wacom_wac->is_invalid_bt_frame) return; switch (equivalent_usage) { case HID_DG_CONFIDENCE: wacom_wac->hid_data.confidence = value; break; case HID_GD_X: wacom_wac->hid_data.x = value; break; case HID_GD_Y: wacom_wac->hid_data.y = value; break; case HID_DG_WIDTH: wacom_wac->hid_data.width = value; break; case HID_DG_HEIGHT: wacom_wac->hid_data.height = value; break; case HID_DG_CONTACTID: wacom_wac->hid_data.id = value; break; case HID_DG_TIPSWITCH: wacom_wac->hid_data.tipswitch = value; break; case WACOM_HID_WT_REPORT_VALID: wacom_wac->is_invalid_bt_frame = !value; return; case HID_DG_CONTACTMAX: if (!features->touch_max) { features->touch_max = value; } else { hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max " "%d -> %d\n", __func__, features->touch_max, value); } return; } if (usage->usage_index + 1 == field->report_count) { if (equivalent_usage == wacom_wac->hid_data.last_slot_field) wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } } static void wacom_wac_finger_pre_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct hid_data* hid_data = &wacom_wac->hid_data; int i; if (touch_is_muted(wacom_wac) && !wacom_wac->shared->touch_down) return; wacom_wac->is_invalid_bt_frame = false; hid_data->confidence = true; hid_data->cc_report = 0; hid_data->cc_index = -1; hid_data->cc_value_index = -1; for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; for (j = 0; j < field->maxusage; j++) { struct hid_usage *usage = &field->usage[j]; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); switch (equivalent_usage) { case HID_GD_X: case HID_GD_Y: case HID_DG_WIDTH: case HID_DG_HEIGHT: case HID_DG_CONTACTID: case HID_DG_INRANGE: case HID_DG_INVERT: case HID_DG_TIPSWITCH: hid_data->last_slot_field = equivalent_usage; break; case HID_DG_CONTACTCOUNT: hid_data->cc_report = report->id; hid_data->cc_index = i; hid_data->cc_value_index = j; break; } } } if (hid_data->cc_report != 0 && hid_data->cc_index >= 0) { struct hid_field *field = report->field[hid_data->cc_index]; int value = field->value[hid_data->cc_value_index]; if (value) { hid_data->num_expected = value; hid_data->num_received = 0; } } else { hid_data->num_expected = wacom_wac->features.touch_max; hid_data->num_received = 0; } } static void wacom_wac_finger_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; /* if there was nothing to process, don't send an empty sync */ if (wacom_wac->hid_data.num_expected == 0) return; /* If more packets of data are expected, give us a chance to * process them rather than immediately syncing a partial * update. */ if (wacom_wac->hid_data.num_received < wacom_wac->hid_data.num_expected) return; if (touch_max > 1) input_mt_sync_frame(input); input_sync(input); wacom_wac->hid_data.num_received = 0; wacom_wac->hid_data.num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); } void wacom_wac_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; if (WACOM_DIRECT_DEVICE(field)) features->device_type |= WACOM_DEVICETYPE_DIRECT; /* usage tests must precede field tests */ if (WACOM_BATTERY_USAGE(usage)) wacom_wac_battery_usage_mapping(hdev, field, usage); else if (WACOM_PAD_FIELD(field)) wacom_wac_pad_usage_mapping(hdev, field, usage); else if (WACOM_PEN_FIELD(field)) wacom_wac_pen_usage_mapping(hdev, field, usage); else if (WACOM_FINGER_FIELD(field)) wacom_wac_finger_usage_mapping(hdev, field, usage); } void wacom_wac_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct wacom *wacom = hid_get_drvdata(hdev); if (wacom->wacom_wac.features.type != HID_GENERIC) return; if (value > field->logical_maximum || value < field->logical_minimum) return; /* usage tests must precede field tests */ if (WACOM_BATTERY_USAGE(usage)) wacom_wac_battery_event(hdev, field, usage, value); else if (WACOM_PAD_FIELD(field)) wacom_wac_pad_event(hdev, field, usage, value); else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) wacom_wac_pen_event(hdev, field, usage, value); else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input) wacom_wac_finger_event(hdev, field, usage, value); } static void wacom_report_events(struct hid_device *hdev, struct hid_report *report, int collection_index, int field_index) { int r; for (r = field_index; r < report->maxfield; r++) { struct hid_field *field; unsigned count, n; field = report->field[r]; count = field->report_count; if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) continue; for (n = 0 ; n < count; n++) { if (field->usage[n].collection_index == collection_index) wacom_wac_event(hdev, field, &field->usage[n], field->value[n]); else return; } } } static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, int collection_index, struct hid_field *field, int field_index) { struct wacom *wacom = hid_get_drvdata(hdev); wacom_report_events(hdev, report, collection_index, field_index); /* * Non-input reports may be sent prior to the device being * completely initialized. Since only their events need * to be processed, exit after 'wacom_report_events' has * been called to prevent potential crashes in the report- * processing functions. */ if (report->type != HID_INPUT_REPORT) return -1; if (WACOM_PAD_FIELD(field)) return 0; else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) wacom_wac_pen_report(hdev, report); else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input) wacom_wac_finger_report(hdev, report); return 0; } void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct hid_field *field; bool pad_in_hid_field = false, pen_in_hid_field = false, finger_in_hid_field = false, true_pad = false; int r; int prev_collection = -1; if (wacom_wac->features.type != HID_GENERIC) return; for (r = 0; r < report->maxfield; r++) { field = report->field[r]; if (WACOM_PAD_FIELD(field)) pad_in_hid_field = true; if (WACOM_PEN_FIELD(field)) pen_in_hid_field = true; if (WACOM_FINGER_FIELD(field)) finger_in_hid_field = true; if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) true_pad = true; } wacom_wac_battery_pre_report(hdev, report); if (pad_in_hid_field && wacom_wac->pad_input) wacom_wac_pad_pre_report(hdev, report); if (pen_in_hid_field && wacom_wac->pen_input) wacom_wac_pen_pre_report(hdev, report); if (finger_in_hid_field && wacom_wac->touch_input) wacom_wac_finger_pre_report(hdev, report); for (r = 0; r < report->maxfield; r++) { field = report->field[r]; if (field->usage[0].collection_index != prev_collection) { if (wacom_wac_collection(hdev, report, field->usage[0].collection_index, field, r) < 0) return; prev_collection = field->usage[0].collection_index; } } wacom_wac_battery_report(hdev, report); if (true_pad && wacom_wac->pad_input) wacom_wac_pad_report(hdev, report, field); } static int wacom_bpt_touch(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->touch_input; struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int i; if (data[0] != 0x02) return 0; for (i = 0; i < 2; i++) { int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); bool touch = report_touch_events(wacom) && (data[offset + 3] & 0x80); input_mt_slot(input, i); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { x <<= 5; y <<= 5; } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } } input_mt_sync_frame(input); input_report_key(pad_input, BTN_LEFT, (data[1] & 0x08) != 0); input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); return 1; } static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->touch_input; bool touch = data[1] & 0x80; int slot = input_mt_get_slot_by_key(input, data[0]); if (slot < 0) return; touch = touch && report_touch_events(wacom); input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = (data[2] << 4) | (data[4] >> 4); int y = (data[3] << 4) | (data[4] & 0x0f); int width, height; if (features->type >= INTUOSPS && features->type <= INTUOSHT2) { width = data[5] * 100; height = data[6] * 100; } else { /* * "a" is a scaled-down area which we assume is * roughly circular and which can be described as: * a=(pi*r^2)/C. */ int a = data[5]; int x_res = input_abs_get_res(input, ABS_MT_POSITION_X); int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y); width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); height = width * y_res / x_res; } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); input_report_abs(input, ABS_MT_TOUCH_MINOR, height); } } static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) { struct input_dev *input = wacom->pad_input; struct wacom_features *features = &wacom->features; if (features->type == INTUOSHT || features->type == INTUOSHT2) { input_report_key(input, BTN_LEFT, (data[1] & 0x02) != 0); input_report_key(input, BTN_BACK, (data[1] & 0x08) != 0); } else { input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); } input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0); } static int wacom_bpt3_touch(struct wacom_wac *wacom) { unsigned char *data = wacom->data; int count = data[1] & 0x07; int touch_changed = 0, i; if (data[0] != 0x02) return 0; /* data has up to 7 fixed sized 8-byte messages starting at data[2] */ for (i = 0; i < count; i++) { int offset = (8 * i) + 2; int msg_id = data[offset]; if (msg_id >= 2 && msg_id <= 17) { wacom_bpt3_touch_msg(wacom, data + offset); touch_changed++; } else if (msg_id == 128) wacom_bpt3_button_msg(wacom, data + offset); } /* only update touch if we actually have a touchpad and touch data changed */ if (wacom->touch_input && touch_changed) { input_mt_sync_frame(wacom->touch_input); wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); } return 1; } static int wacom_bpt_pen(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->pen_input; unsigned char *data = wacom->data; int x = 0, y = 0, p = 0, d = 0; bool pen = false, btn1 = false, btn2 = false; bool range, prox, rdy; if (data[0] != WACOM_REPORT_PENABLED) return 0; range = (data[1] & 0x80) == 0x80; prox = (data[1] & 0x40) == 0x40; rdy = (data[1] & 0x20) == 0x20; wacom->shared->stylus_in_proximity = range; if (delay_pen_events(wacom)) return 0; if (rdy) { p = le16_to_cpup((__le16 *)&data[6]); pen = data[1] & 0x01; btn1 = data[1] & 0x02; btn2 = data[1] & 0x04; } if (prox) { x = le16_to_cpup((__le16 *)&data[2]); y = le16_to_cpup((__le16 *)&data[4]); if (data[1] & 0x08) { wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; } else { wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; } wacom->reporting_data = true; } if (range) { /* * Convert distance from out prox to distance from tablet. * distance will be greater than distance_max once * touching and applying pressure; do not report negative * distance. */ if (data[8] <= features->distance_max) d = features->distance_max - data[8]; } else { wacom->id[0] = 0; } if (wacom->reporting_data) { input_report_key(input, BTN_TOUCH, pen); input_report_key(input, BTN_STYLUS, btn1); input_report_key(input, BTN_STYLUS2, btn2); if (prox || !range) { input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); } input_report_abs(input, ABS_PRESSURE, p); input_report_abs(input, ABS_DISTANCE, d); input_report_key(input, wacom->tool[0], range); /* PEN or RUBBER */ input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */ } if (!range) { wacom->reporting_data = false; } return 1; } static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len) { struct wacom_features *features = &wacom->features; if ((features->type == INTUOSHT2) && (features->device_type & WACOM_DEVICETYPE_PEN)) return wacom_intuos_irq(wacom); else if (len == WACOM_PKGLEN_BBTOUCH) return wacom_bpt_touch(wacom); else if (len == WACOM_PKGLEN_BBTOUCH3) return wacom_bpt3_touch(wacom); else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN) return wacom_bpt_pen(wacom); return 0; } static void wacom_bamboo_pad_pen_event(struct wacom_wac *wacom, unsigned char *data) { unsigned char prefix; /* * We need to reroute the event from the debug interface to the * pen interface. * We need to add the report ID to the actual pen report, so we * temporary overwrite the first byte to prevent having to kzalloc/kfree * and memcpy the report. */ prefix = data[0]; data[0] = WACOM_REPORT_BPAD_PEN; /* * actually reroute the event. * No need to check if wacom->shared->pen is valid, hid_input_report() * will check for us. */ hid_input_report(wacom->shared->pen, HID_INPUT_REPORT, data, WACOM_PKGLEN_PENABLED, 1); data[0] = prefix; } static int wacom_bamboo_pad_touch_event(struct wacom_wac *wacom, unsigned char *data) { struct input_dev *input = wacom->touch_input; unsigned char *finger_data, prefix; unsigned id; int x, y; bool valid; prefix = data[0]; for (id = 0; id < wacom->features.touch_max; id++) { valid = !!(prefix & BIT(id)) && report_touch_events(wacom); input_mt_slot(input, id); input_mt_report_slot_state(input, MT_TOOL_FINGER, valid); if (!valid) continue; finger_data = data + 1 + id * 3; x = finger_data[0] | ((finger_data[1] & 0x0f) << 8); y = (finger_data[2] << 4) | (finger_data[1] >> 4); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } input_mt_sync_frame(input); input_report_key(input, BTN_LEFT, prefix & 0x40); input_report_key(input, BTN_RIGHT, prefix & 0x80); /* keep touch state for pen event */ wacom->shared->touch_down = !!prefix && report_touch_events(wacom); return 1; } static int wacom_bamboo_pad_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; if (!((len == WACOM_PKGLEN_BPAD_TOUCH) || (len == WACOM_PKGLEN_BPAD_TOUCH_USB)) || (data[0] != WACOM_REPORT_BPAD_TOUCH)) return 0; if (data[1] & 0x01) wacom_bamboo_pad_pen_event(wacom, &data[1]); if (data[1] & 0x02) return wacom_bamboo_pad_touch_event(wacom, &data[9]); return 0; } static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; int connected; if (len != WACOM_PKGLEN_WIRELESS || data[0] != WACOM_REPORT_WL) return 0; connected = data[1] & 0x01; if (connected) { int pid, battery, charging; if ((wacom->shared->type == INTUOSHT || wacom->shared->type == INTUOSHT2) && wacom->shared->touch_input && wacom->shared->touch_max) { input_report_switch(wacom->shared->touch_input, SW_MUTE_DEVICE, data[5] & 0x40); input_sync(wacom->shared->touch_input); } pid = get_unaligned_be16(&data[6]); battery = (data[5] & 0x3f) * 100 / 31; charging = !!(data[5] & 0x80); if (wacom->pid != pid) { wacom->pid = pid; wacom_schedule_work(wacom, WACOM_WORKER_WIRELESS); } wacom_notify_battery(wacom, WACOM_POWER_SUPPLY_STATUS_AUTO, battery, charging, 1, 0); } else if (wacom->pid != 0) { /* disconnected while previously connected */ wacom->pid = 0; wacom_schedule_work(wacom, WACOM_WORKER_WIRELESS); wacom_notify_battery(wacom, POWER_SUPPLY_STATUS_UNKNOWN, 0, 0, 0, 0); } return 0; } static int wacom_status_irq(struct wacom_wac *wacom_wac, size_t len) { struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); struct wacom_features *features = &wacom_wac->features; unsigned char *data = wacom_wac->data; if (data[0] != WACOM_REPORT_USB) return 0; if ((features->type == INTUOSHT || features->type == INTUOSHT2) && wacom_wac->shared->touch_input && features->touch_max) { input_report_switch(wacom_wac->shared->touch_input, SW_MUTE_DEVICE, data[8] & 0x40); input_sync(wacom_wac->shared->touch_input); } if (data[9] & 0x02) { /* wireless module is attached */ int battery = (data[8] & 0x3f) * 100 / 31; bool charging = !!(data[8] & 0x80); features->quirks |= WACOM_QUIRK_BATTERY; wacom_notify_battery(wacom_wac, WACOM_POWER_SUPPLY_STATUS_AUTO, battery, charging, battery || charging, 1); } else if ((features->quirks & WACOM_QUIRK_BATTERY) && wacom->battery.battery) { features->quirks &= ~WACOM_QUIRK_BATTERY; wacom_notify_battery(wacom_wac, POWER_SUPPLY_STATUS_UNKNOWN, 0, 0, 0, 0); } return 0; } void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) { bool sync; switch (wacom_wac->features.type) { case PENPARTNER: sync = wacom_penpartner_irq(wacom_wac); break; case PL: sync = wacom_pl_irq(wacom_wac); break; case WACOM_G4: case GRAPHIRE: case GRAPHIRE_BT: case WACOM_MO: sync = wacom_graphire_irq(wacom_wac); break; case PTU: sync = wacom_ptu_irq(wacom_wac); break; case DTU: sync = wacom_dtu_irq(wacom_wac); break; case DTUS: case DTUSX: sync = wacom_dtus_irq(wacom_wac); break; case INTUOS: case INTUOS3S: case INTUOS3: case INTUOS3L: case INTUOS4S: case INTUOS4: case INTUOS4L: case CINTIQ: case WACOM_BEE: case WACOM_13HD: case WACOM_21UX2: case WACOM_22HD: case WACOM_24HD: case WACOM_27QHD: case DTK: case CINTIQ_HYBRID: case CINTIQ_COMPANION_2: sync = wacom_intuos_irq(wacom_wac); break; case INTUOS4WL: sync = wacom_intuos_bt_irq(wacom_wac, len); break; case WACOM_24HDT: case WACOM_27QHDT: sync = wacom_24hdt_irq(wacom_wac); break; case INTUOS5S: case INTUOS5: case INTUOS5L: case INTUOSPS: case INTUOSPM: case INTUOSPL: if (len == WACOM_PKGLEN_BBTOUCH3) sync = wacom_bpt3_touch(wacom_wac); else if (wacom_wac->data[0] == WACOM_REPORT_USB) sync = wacom_status_irq(wacom_wac, len); else sync = wacom_intuos_irq(wacom_wac); break; case INTUOSP2_BT: case INTUOSP2S_BT: case INTUOSHT3_BT: sync = wacom_intuos_pro2_bt_irq(wacom_wac, len); break; case TABLETPC: case TABLETPCE: case TABLETPC2FG: case MTSCREEN: case MTTPC: case MTTPC_B: sync = wacom_tpc_irq(wacom_wac, len); break; case BAMBOO_PT: case BAMBOO_PEN: case BAMBOO_TOUCH: case INTUOSHT: case INTUOSHT2: if (wacom_wac->data[0] == WACOM_REPORT_USB) sync = wacom_status_irq(wacom_wac, len); else sync = wacom_bpt_irq(wacom_wac, len); break; case BAMBOO_PAD: sync = wacom_bamboo_pad_irq(wacom_wac, len); break; case WIRELESS: sync = wacom_wireless_irq(wacom_wac, len); break; case REMOTE: sync = false; if (wacom_wac->data[0] == WACOM_REPORT_DEVICE_LIST) wacom_remote_status_irq(wacom_wac, len); else sync = wacom_remote_irq(wacom_wac, len); break; default: sync = false; break; } if (sync) { if (wacom_wac->pen_input) input_sync(wacom_wac->pen_input); if (wacom_wac->touch_input) input_sync(wacom_wac->touch_input); if (wacom_wac->pad_input) input_sync(wacom_wac->pad_input); } } static void wacom_setup_basic_pro_pen(struct wacom_wac *wacom_wac) { struct input_dev *input_dev = wacom_wac->pen_input; input_set_capability(input_dev, EV_MSC, MSC_SERIAL); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features.distance_max, wacom_wac->features.distance_fuzz, 0); } static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) { struct input_dev *input_dev = wacom_wac->pen_input; struct wacom_features *features = &wacom_wac->features; wacom_setup_basic_pro_pen(wacom_wac); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_BRUSH, input_dev->keybit); __set_bit(BTN_TOOL_PENCIL, input_dev->keybit); __set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit); input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0); input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, features->tilt_fuzz, 0); input_abs_set_res(input_dev, ABS_TILT_X, 57); input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, features->tilt_fuzz, 0); input_abs_set_res(input_dev, ABS_TILT_Y, 57); } static void wacom_setup_intuos(struct wacom_wac *wacom_wac) { struct input_dev *input_dev = wacom_wac->pen_input; input_set_capability(input_dev, EV_REL, REL_WHEEL); wacom_setup_cintiq(wacom_wac); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_SIDE, input_dev->keybit); __set_bit(BTN_EXTRA, input_dev->keybit); __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_TOOL_LENS, input_dev->keybit); input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_RZ, 287); input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0); } void wacom_setup_device_quirks(struct wacom *wacom) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom->wacom_wac.features; /* The pen and pad share the same interface on most devices */ if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 || features->type == DTUS || (features->type >= INTUOS3S && features->type <= WACOM_MO)) { if (features->device_type & WACOM_DEVICETYPE_PEN) features->device_type |= WACOM_DEVICETYPE_PAD; } /* touch device found but size is not defined. use default */ if (features->device_type & WACOM_DEVICETYPE_TOUCH && !features->x_max) { features->x_max = 1023; features->y_max = 1023; } /* * Intuos5/Pro and Bamboo 3rd gen have no useful data about its * touch interface in its HID descriptor. If this is the touch * interface (PacketSize of WACOM_PKGLEN_BBTOUCH3), override the * tablet values. */ if ((features->type >= INTUOS5S && features->type <= INTUOSPL) || (features->type >= INTUOSHT && features->type <= BAMBOO_PT)) { if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { if (features->touch_max) features->device_type |= WACOM_DEVICETYPE_TOUCH; if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) features->device_type |= WACOM_DEVICETYPE_PAD; if (features->type == INTUOSHT2) { features->x_max = features->x_max / 10; features->y_max = features->y_max / 10; } else { features->x_max = 4096; features->y_max = 4096; } } else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->device_type |= WACOM_DEVICETYPE_PAD; } } /* * Hack for the Bamboo One: * the device presents a PAD/Touch interface as most Bamboos and even * sends ghosts PAD data on it. However, later, we must disable this * ghost interface, and we can not detect it unless we set it here * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH. */ if (features->type == BAMBOO_PEN && features->pktlen == WACOM_PKGLEN_BBTOUCH3) features->device_type |= WACOM_DEVICETYPE_PAD; /* * Raw Wacom-mode pen and touch events both come from interface * 0, whose HID descriptor has an application usage of 0xFF0D * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back * out through the HID_GENERIC device created for interface 1, * so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH. */ if (features->type == BAMBOO_PAD) features->device_type = WACOM_DEVICETYPE_TOUCH; if (features->type == REMOTE) features->device_type = WACOM_DEVICETYPE_PAD; if (features->type == INTUOSP2_BT || features->type == INTUOSP2S_BT) { features->device_type |= WACOM_DEVICETYPE_PEN | WACOM_DEVICETYPE_PAD | WACOM_DEVICETYPE_TOUCH; features->quirks |= WACOM_QUIRK_BATTERY; } if (features->type == INTUOSHT3_BT) { features->device_type |= WACOM_DEVICETYPE_PEN | WACOM_DEVICETYPE_PAD; features->quirks |= WACOM_QUIRK_BATTERY; } switch (features->type) { case PL: case DTU: case DTUS: case DTUSX: case WACOM_21UX2: case WACOM_22HD: case DTK: case WACOM_24HD: case WACOM_27QHD: case CINTIQ_HYBRID: case CINTIQ_COMPANION_2: case CINTIQ: case WACOM_BEE: case WACOM_13HD: case WACOM_24HDT: case WACOM_27QHDT: case TABLETPC: case TABLETPCE: case TABLETPC2FG: case MTSCREEN: case MTTPC: case MTTPC_B: features->device_type |= WACOM_DEVICETYPE_DIRECT; break; } if (wacom->hdev->bus == BUS_BLUETOOTH) features->quirks |= WACOM_QUIRK_BATTERY; /* quirk for bamboo touch with 2 low res touches */ if ((features->type == BAMBOO_PT || features->type == BAMBOO_TOUCH) && features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->x_max <<= 5; features->y_max <<= 5; features->x_fuzz <<= 5; features->y_fuzz <<= 5; features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES; } if (features->type == WIRELESS) { if (features->device_type == WACOM_DEVICETYPE_WL_MONITOR) { features->quirks |= WACOM_QUIRK_BATTERY; } } if (features->type == REMOTE) features->device_type |= WACOM_DEVICETYPE_WL_MONITOR; /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots * of things it shouldn't. Lets fix up the damage... */ if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) { features->quirks &= ~WACOM_QUIRK_TOOLSERIAL; __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit); __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit); __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit); __clear_bit(ABS_Z, wacom_wac->pen_input->absbit); __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit); __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit); __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit); __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit); __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit); __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit); __clear_bit(EV_MSC, wacom_wac->pen_input->evbit); } } int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; if (!(features->device_type & WACOM_DEVICETYPE_PEN)) return -ENODEV; if (features->device_type & WACOM_DEVICETYPE_DIRECT) __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); else __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->type == HID_GENERIC) /* setup has already been done */ return 0; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left, features->x_max - features->offset_right, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top, features->y_max - features->offset_bottom, features->y_fuzz, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, features->pressure_fuzz, 0); /* penabled devices have fixed resolution for each model */ input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); switch (features->type) { case GRAPHIRE_BT: __clear_bit(ABS_MISC, input_dev->absbit); fallthrough; case WACOM_MO: case WACOM_G4: input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, features->distance_fuzz, 0); fallthrough; case GRAPHIRE: input_set_capability(input_dev, EV_REL, REL_WHEEL); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); break; case WACOM_27QHD: case WACOM_24HD: case DTK: case WACOM_22HD: case WACOM_21UX2: case WACOM_BEE: case CINTIQ: case WACOM_13HD: case CINTIQ_HYBRID: case CINTIQ_COMPANION_2: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_Z, 287); wacom_setup_cintiq(wacom_wac); break; case INTUOS3: case INTUOS3L: case INTUOS3S: case INTUOS4: case INTUOS4WL: case INTUOS4L: case INTUOS4S: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_Z, 287); fallthrough; case INTUOS: wacom_setup_intuos(wacom_wac); break; case INTUOS5: case INTUOS5L: case INTUOSPM: case INTUOSPL: case INTUOS5S: case INTUOSPS: case INTUOSP2_BT: case INTUOSP2S_BT: input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, features->distance_fuzz, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_Z, 287); wacom_setup_intuos(wacom_wac); break; case WACOM_24HDT: case WACOM_27QHDT: case MTSCREEN: case MTTPC: case MTTPC_B: case TABLETPC2FG: case TABLETPC: case TABLETPCE: __clear_bit(ABS_MISC, input_dev->absbit); fallthrough; case DTUS: case DTUSX: case PL: case DTU: __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); break; case PTU: __set_bit(BTN_STYLUS2, input_dev->keybit); fallthrough; case PENPARTNER: __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); break; case INTUOSHT: case BAMBOO_PT: case BAMBOO_PEN: case INTUOSHT2: case INTUOSHT3_BT: if (features->type == INTUOSHT2 || features->type == INTUOSHT3_BT) { wacom_setup_basic_pro_pen(wacom_wac); } else { __clear_bit(ABS_MISC, input_dev->absbit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, features->distance_fuzz, 0); } break; case BAMBOO_PAD: __clear_bit(ABS_MISC, input_dev->absbit); break; } return 0; } int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) return -ENODEV; if (features->device_type & WACOM_DEVICETYPE_DIRECT) __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); else __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->type == HID_GENERIC) /* setup has already been done */ return 0; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); if (features->touch_max == 1) { input_set_abs_params(input_dev, ABS_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, features->y_fuzz, 0); input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); } else if (features->touch_max > 1) { input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, features->y_max, features->y_fuzz, 0); input_abs_set_res(input_dev, ABS_MT_POSITION_X, features->x_resolution); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, features->y_resolution); } switch (features->type) { case INTUOSP2_BT: case INTUOSP2S_BT: input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); if (wacom_wac->shared->touch->product == 0x361) { input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, 12440, 4, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, 8640, 4, 0); } else if (wacom_wac->shared->touch->product == 0x360) { input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, 8960, 4, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, 5920, 4, 0); } else if (wacom_wac->shared->touch->product == 0x393) { input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, 6400, 4, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, 4000, 4, 0); } input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40); fallthrough; case INTUOS5: case INTUOS5L: case INTUOSPM: case INTUOSPL: case INTUOS5S: case INTUOSPS: input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); break; case WACOM_24HDT: input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); fallthrough; case WACOM_27QHDT: if (wacom_wac->shared->touch->product == 0x32C || wacom_wac->shared->touch->product == 0xF6) { input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); wacom_wac->has_mute_touch_switch = true; wacom_wac->is_soft_touch_switch = true; } fallthrough; case MTSCREEN: case MTTPC: case MTTPC_B: case TABLETPC2FG: input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); fallthrough; case TABLETPC: case TABLETPCE: break; case INTUOSHT: case INTUOSHT2: input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); fallthrough; case BAMBOO_PT: case BAMBOO_TOUCH: if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); } input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); break; case BAMBOO_PAD: input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); break; } return 0; } static int wacom_numbered_button_to_key(int n) { if (n < 10) return BTN_0 + n; else if (n < 16) return BTN_A + (n-10); else if (n < 18) return BTN_BASE + (n-16); else return 0; } static void wacom_setup_numbered_buttons(struct input_dev *input_dev, int button_count) { int i; for (i = 0; i < button_count; i++) { int key = wacom_numbered_button_to_key(i); if (key) __set_bit(key, input_dev->keybit); } } static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group) { struct wacom_led *led; int i; bool updated = false; /* * 24HD has LED group 1 to the left and LED group 0 to the right. * So group 0 matches the second half of the buttons and thus the mask * needs to be shifted. */ if (group == 0) mask >>= 8; for (i = 0; i < 3; i++) { led = wacom_led_find(wacom, group, i); if (!led) { hid_err(wacom->hdev, "can't find LED %d in group %d\n", i, group); continue; } if (!updated && mask & BIT(i)) { led->held = true; led_trigger_event(&led->trigger, LED_FULL); } else { led->held = false; } } } static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, int mask, int group) { int group_button; /* * 21UX2 has LED group 1 to the left and LED group 0 * to the right. We need to reverse the group to match this * historical behavior. */ if (wacom->wacom_wac.features.type == WACOM_21UX2) group = 1 - group; group_button = group * (button_count/wacom->led.count); if (wacom->wacom_wac.features.type == INTUOSP2_BT) group_button = 8; return mask & (1 << group_button); } static void wacom_update_led(struct wacom *wacom, int button_count, int mask, int group) { struct wacom_led *led, *next_led; int cur; bool pressed; if (wacom->wacom_wac.features.type == WACOM_24HD) return wacom_24hd_update_leds(wacom, mask, group); pressed = wacom_is_led_toggled(wacom, button_count, mask, group); cur = wacom->led.groups[group].select; led = wacom_led_find(wacom, group, cur); if (!led) { hid_err(wacom->hdev, "can't find current LED %d in group %d\n", cur, group); return; } if (!pressed) { led->held = false; return; } if (led->held && pressed) return; next_led = wacom_led_next(wacom, led); if (!next_led) { hid_err(wacom->hdev, "can't find next LED in group %d\n", group); return; } if (next_led == led) return; next_led->held = true; led_trigger_event(&next_led->trigger, wacom_leds_brightness_get(next_led)); } static void wacom_report_numbered_buttons(struct input_dev *input_dev, int button_count, int mask) { struct wacom *wacom = input_get_drvdata(input_dev); int i; for (i = 0; i < wacom->led.count; i++) wacom_update_led(wacom, button_count, mask, i); for (i = 0; i < button_count; i++) { int key = wacom_numbered_button_to_key(i); if (key) input_report_key(input_dev, key, mask & (1 << i)); } } int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; if ((features->type == HID_GENERIC) && features->numbered_buttons > 0) features->device_type |= WACOM_DEVICETYPE_PAD; if (!(features->device_type & WACOM_DEVICETYPE_PAD)) return -ENODEV; if (features->type == REMOTE && input_dev == wacom_wac->pad_input) return -ENODEV; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); /* kept for making legacy xf86-input-wacom working with the wheels */ __set_bit(ABS_MISC, input_dev->absbit); /* kept for making legacy xf86-input-wacom accepting the pad */ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum || input_dev->absinfo[ABS_X].maximum))) input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0); if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum || input_dev->absinfo[ABS_Y].maximum))) input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0); /* kept for making udev and libwacom accepting the pad */ __set_bit(BTN_STYLUS, input_dev->keybit); wacom_setup_numbered_buttons(input_dev, features->numbered_buttons); switch (features->type) { case CINTIQ_HYBRID: case CINTIQ_COMPANION_2: case DTK: case DTUS: case GRAPHIRE_BT: break; case WACOM_MO: __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; case WACOM_G4: __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); input_set_capability(input_dev, EV_REL, REL_WHEEL); break; case WACOM_24HD: __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); __set_bit(KEY_ONSCREEN_KEYBOARD, input_dev->keybit); __set_bit(KEY_INFO, input_dev->keybit); if (!features->oPid) __set_bit(KEY_BUTTONCONFIG, input_dev->keybit); input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); break; case WACOM_27QHD: __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); __set_bit(KEY_ONSCREEN_KEYBOARD, input_dev->keybit); __set_bit(KEY_BUTTONCONFIG, input_dev->keybit); if (!features->oPid) __set_bit(KEY_CONTROLPANEL, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, -2048, 2048, 0, 0); input_abs_set_res(input_dev, ABS_X, 1024); /* points/g */ input_set_abs_params(input_dev, ABS_Y, -2048, 2048, 0, 0); input_abs_set_res(input_dev, ABS_Y, 1024); input_set_abs_params(input_dev, ABS_Z, -2048, 2048, 0, 0); input_abs_set_res(input_dev, ABS_Z, 1024); __set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit); break; case WACOM_22HD: __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); __set_bit(KEY_BUTTONCONFIG, input_dev->keybit); __set_bit(KEY_INFO, input_dev->keybit); fallthrough; case WACOM_21UX2: case WACOM_BEE: case CINTIQ: input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); break; case WACOM_13HD: input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; case INTUOS3: case INTUOS3L: input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); fallthrough; case INTUOS3S: input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); break; case INTUOS5: case INTUOS5L: case INTUOSPM: case INTUOSPL: case INTUOS5S: case INTUOSPS: case INTUOSP2_BT: case INTUOSP2S_BT: input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; case INTUOS4WL: /* * For Bluetooth devices, the udev rule does not work correctly * for pads unless we add a stylus capability, which forces * ID_INPUT_TABLET to be set. */ __set_bit(BTN_STYLUS, input_dev->keybit); fallthrough; case INTUOS4: case INTUOS4L: case INTUOS4S: input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; case INTUOSHT: case BAMBOO_PT: case BAMBOO_TOUCH: case INTUOSHT2: __clear_bit(ABS_MISC, input_dev->absbit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); break; case REMOTE: input_set_capability(input_dev, EV_MSC, MSC_SERIAL); input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; case INTUOSHT3_BT: case HID_GENERIC: break; default: /* no pad supported */ return -ENODEV; } return 0; } static const struct wacom_features wacom_features_0x00 = { "Wacom Penpartner", 5040, 3780, 255, 0, PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x10 = { "Wacom Graphire", 10206, 7422, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x81 = { "Wacom Graphire BT", 16704, 12064, 511, 32, GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES, 2 }; static const struct wacom_features wacom_features_0x11 = { "Wacom Graphire2 4x5", 10206, 7422, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x12 = { "Wacom Graphire2 5x7", 13918, 10206, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x13 = { "Wacom Graphire3", 10208, 7424, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x14 = { "Wacom Graphire3 6x8", 16704, 12064, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x15 = { "Wacom Graphire4 4x5", 10208, 7424, 511, 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x16 = { "Wacom Graphire4 6x8", 16704, 12064, 511, 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x17 = { "Wacom BambooFun 4x5", 14760, 9225, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x18 = { "Wacom BambooFun 6x8", 21648, 13530, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x19 = { "Wacom Bamboo1 Medium", 16704, 12064, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x60 = { "Wacom Volito", 5104, 3712, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x61 = { "Wacom PenStation2", 3250, 2320, 255, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x62 = { "Wacom Volito2 4x5", 5104, 3712, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x63 = { "Wacom Volito2 2x3", 3248, 2320, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x64 = { "Wacom PenPartner2", 3250, 2320, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x65 = { "Wacom Bamboo", 14760, 9225, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x69 = { "Wacom Bamboo1", 5104, 3712, 511, 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x6A = { "Wacom Bamboo1 4x6", 14760, 9225, 1023, 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6B = { "Wacom Bamboo1 5x8", 21648, 13530, 1023, 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x20 = { "Wacom Intuos 4x5", 12700, 10600, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x21 = { "Wacom Intuos 6x8", 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x22 = { "Wacom Intuos 9x12", 30480, 24060, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x23 = { "Wacom Intuos 12x12", 30480, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x24 = { "Wacom Intuos 12x18", 45720, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x30 = { "Wacom PL400", 5408, 4056, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x31 = { "Wacom PL500", 6144, 4608, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x32 = { "Wacom PL600", 6126, 4604, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x33 = { "Wacom PL600SX", 6260, 5016, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x34 = { "Wacom PL550", 6144, 4608, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x35 = { "Wacom PL800", 7220, 5780, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x37 = { "Wacom PL700", 6758, 5406, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x38 = { "Wacom PL510", 6282, 4762, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x39 = { "Wacom DTU710", 34080, 27660, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC4 = { "Wacom DTF521", 6282, 4762, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC0 = { "Wacom DTF720", 6858, 5506, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC2 = { "Wacom DTF720a", 6858, 5506, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x03 = { "Wacom Cintiq Partner", 20480, 15360, 511, 0, PTU, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x41 = { "Wacom Intuos2 4x5", 12700, 10600, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x42 = { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x43 = { "Wacom Intuos2 9x12", 30480, 24060, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x44 = { "Wacom Intuos2 12x12", 30480, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x45 = { "Wacom Intuos2 12x18", 45720, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xB0 = { "Wacom Intuos3 4x5", 25400, 20320, 1023, 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 }; static const struct wacom_features wacom_features_0xB1 = { "Wacom Intuos3 6x8", 40640, 30480, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xB2 = { "Wacom Intuos3 9x12", 60960, 45720, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xB3 = { "Wacom Intuos3 12x12", 60960, 60960, 1023, 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xB4 = { "Wacom Intuos3 12x19", 97536, 60960, 1023, 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xB5 = { "Wacom Intuos3 6x11", 54204, 31750, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xB7 = { "Wacom Intuos3 4x6", 31496, 19685, 1023, 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 }; static const struct wacom_features wacom_features_0xB8 = { "Wacom Intuos4 4x6", 31496, 19685, 2047, 63, INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 }; static const struct wacom_features wacom_features_0xB9 = { "Wacom Intuos4 6x9", 44704, 27940, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0xBA = { "Wacom Intuos4 8x13", 65024, 40640, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0xBB = { "Wacom Intuos4 12x19", 97536, 60960, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0xBC = { "Wacom Intuos4 WL", 40640, 25400, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0xBD = { "Wacom Intuos4 WL", 40640, 25400, 2047, 63, INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0x26 = { "Wacom Intuos5 touch S", 31496, 19685, 2047, 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16 }; static const struct wacom_features wacom_features_0x27 = { "Wacom Intuos5 touch M", 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 }; static const struct wacom_features wacom_features_0x28 = { "Wacom Intuos5 touch L", 65024, 40640, 2047, 63, INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 }; static const struct wacom_features wacom_features_0x29 = { "Wacom Intuos5 S", 31496, 19685, 2047, 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 }; static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 }; static const struct wacom_features wacom_features_0x314 = { "Wacom Intuos Pro S", 31496, 19685, 2047, 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x315 = { "Wacom Intuos Pro M", 44704, 27940, 2047, 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x317 = { "Wacom Intuos Pro L", 65024, 40640, 2047, 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xF4 = { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0xF8 = { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; static const struct wacom_features wacom_features_0xF6 = { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x32A = { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x32B = { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C }; static const struct wacom_features wacom_features_0x32C = { "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32B, .touch_max = 10 }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", 87200, 65600, 1023, 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 }; static const struct wacom_features wacom_features_0xC5 = { "Wacom Cintiq 20WSX", 86680, 54180, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 }; static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 }; static const struct wacom_features wacom_features_0x304 = { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x333 = { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 }; static const struct wacom_features wacom_features_0x335 = { "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x333, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xCE = { "Wacom DTU2231", 47864, 27011, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .check_for_hid_type = true, .hid_type = HID_TYPE_USBMOUSE }; static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xFB = { "Wacom DTU1031", 22096, 13960, 511, 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x32F = { "Wacom DTU1031X", 22672, 12928, 511, 0, DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x336 = { "Wacom DTU1141", 23672, 13403, 1023, 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x57 = { "Wacom DTK2241", 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x59 = /* Pen */ { "Wacom DTH2242", 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D }; static const struct wacom_features wacom_features_0x5D = /* Touch */ { "Wacom DTH2242", .type = WACOM_24HDT, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xCC = { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0xFA = { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x5B = { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; static const struct wacom_features wacom_features_0x5E = { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */ static const struct wacom_features wacom_features_0x93 = { "Wacom ISDv4 93", 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 }; static const struct wacom_features wacom_features_0x97 = { "Wacom ISDv4 97", 26202, 16325, 511, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */ static const struct wacom_features wacom_features_0x9A = { "Wacom ISDv4 9A", 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 }; static const struct wacom_features wacom_features_0x9F = { "Wacom ISDv4 9F", 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 }; static const struct wacom_features wacom_features_0xE2 = { "Wacom ISDv4 E2", 26202, 16325, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xE3 = { "Wacom ISDv4 E3", 26202, 16325, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xE5 = { "Wacom ISDv4 E5", 26202, 16325, 255, 0, MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE6 = { "Wacom ISDv4 E6", 27760, 15694, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xEC = { "Wacom ISDv4 EC", 25710, 14500, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */ static const struct wacom_features wacom_features_0xED = { "Wacom ISDv4 ED", 26202, 16325, 255, 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 }; static const struct wacom_features wacom_features_0xEF = { "Wacom ISDv4 EF", 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */ static const struct wacom_features wacom_features_0x100 = { "Wacom ISDv4 100", 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x101 = { "Wacom ISDv4 101", 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10D = { "Wacom ISDv4 10D", 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10E = { "Wacom ISDv4 10E", 27760, 15694, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10F = { "Wacom ISDv4 10F", 27760, 15694, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x116 = { "Wacom ISDv4 116", 26202, 16325, 255, 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 }; static const struct wacom_features wacom_features_0x12C = { "Wacom ISDv4 12C", 27848, 15752, 2047, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */ static const struct wacom_features wacom_features_0x4001 = { "Wacom ISDv4 4001", 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x4004 = { "Wacom ISDv4 4004", 11060, 6220, 255, 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x5000 = { "Wacom ISDv4 5000", 27848, 15752, 1023, 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x5002 = { "Wacom ISDv4 5002", 29576, 16724, 1023, 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x47 = { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x84 = { "Wacom Wireless Receiver", .type = WIRELESS, .touch_max = 16 }; static const struct wacom_features wacom_features_0xD0 = { "Wacom Bamboo 2FG", 14720, 9200, 1023, 31, BAMBOO_TOUCH, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD1 = { "Wacom Bamboo 2FG 4x5", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD2 = { "Wacom Bamboo Craft", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD3 = { "Wacom Bamboo 2FG 6x8", 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD4 = { "Wacom Bamboo Pen", 14720, 9200, 1023, 31, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD5 = { "Wacom Bamboo Pen 6x8", 21648, 13700, 1023, 31, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD6 = { "Wacom BambooPT 2FG 4x5", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD7 = { "Wacom BambooPT 2FG Small", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD8 = { "Wacom Bamboo Comic 2FG", 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDA = { "Wacom Bamboo 2FG 4x5 SE", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDB = { "Wacom Bamboo 2FG 6x8 SE", 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDD = { "Wacom Bamboo Connect", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDE = { "Wacom Bamboo 16FG 4x5", 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0xDF = { "Wacom Bamboo 16FG 6x8", 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0x300 = { "Wacom Bamboo One S", 14720, 9225, 1023, 31, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x301 = { "Wacom Bamboo One M", 21648, 13530, 1023, 31, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x302 = { "Wacom Intuos PT S", 15200, 9500, 1023, 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x303 = { "Wacom Intuos PT M", 21600, 13500, 1023, 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x30E = { "Wacom Intuos S", 15200, 9500, 1023, 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x307 = { "Wacom ISDv5 307", 59552, 33848, 2047, 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 }; static const struct wacom_features wacom_features_0x309 = { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x30A = { "Wacom ISDv5 30A", 59552, 33848, 2047, 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C }; static const struct wacom_features wacom_features_0x30C = { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x318 = { "Wacom USB Bamboo PAD", 4095, 4095, /* Touch */ .type = BAMBOO_PAD, 35, 48, .touch_max = 4 }; static const struct wacom_features wacom_features_0x319 = { "Wacom Wireless Bamboo PAD", 4095, 4095, /* Touch */ .type = BAMBOO_PAD, 35, 48, .touch_max = 4 }; static const struct wacom_features wacom_features_0x325 = { "Wacom ISDv5 325", 59552, 33848, 2047, 63, CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 }; static const struct wacom_features wacom_features_0x326 = /* Touch */ { "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x325 }; static const struct wacom_features wacom_features_0x323 = { "Wacom Intuos P M", 21600, 13500, 1023, 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x331 = { "Wacom Express Key Remote", .type = REMOTE, .numbered_buttons = 18, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x33B = { "Wacom Intuos S 2", 15200, 9500, 2047, 63, INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x33C = { "Wacom Intuos PT S 2", 15200, 9500, 2047, 63, INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x33D = { "Wacom Intuos P M 2", 21600, 13500, 2047, 63, INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x33E = { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x343 = { "Wacom DTK1651", 34816, 19759, 1023, 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x360 = { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_0x361 = { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_0x377 = { "Wacom Intuos BT S", 15200, 9500, 4095, 63, INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; static const struct wacom_features wacom_features_0x379 = { "Wacom Intuos BT M", 21600, 13500, 4095, 63, INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; static const struct wacom_features wacom_features_0x37A = { "Wacom One by Wacom S", 15200, 9500, 2047, 63, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x37B = { "Wacom One by Wacom M", 21600, 13500, 2047, 63, BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x393 = { "Wacom Intuos Pro S", 31920, 19950, 8191, 63, INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 10 }; static const struct wacom_features wacom_features_0x3c6 = { "Wacom Intuos BT S", 15200, 9500, 4095, 63, INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; static const struct wacom_features wacom_features_0x3c8 = { "Wacom Intuos BT M", 21600, 13500, 4095, 63, INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; static const struct wacom_features wacom_features_0x3dd = { "Wacom Intuos Pro S", 31920, 19950, 8191, 63, INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 10 }; static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; static const struct wacom_features wacom_features_0x94 = { "Wacom Bootloader", .type = BOOTLOADER }; #define USB_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod #define BT_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod #define I2C_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod #define PCI_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod #define USB_DEVICE_LENOVO(prod) \ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ .driver_data = (kernel_ulong_t)&wacom_features_##prod const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x00) }, { USB_DEVICE_WACOM(0x03) }, { USB_DEVICE_WACOM(0x10) }, { USB_DEVICE_WACOM(0x11) }, { USB_DEVICE_WACOM(0x12) }, { USB_DEVICE_WACOM(0x13) }, { USB_DEVICE_WACOM(0x14) }, { USB_DEVICE_WACOM(0x15) }, { USB_DEVICE_WACOM(0x16) }, { USB_DEVICE_WACOM(0x17) }, { USB_DEVICE_WACOM(0x18) }, { USB_DEVICE_WACOM(0x19) }, { USB_DEVICE_WACOM(0x20) }, { USB_DEVICE_WACOM(0x21) }, { USB_DEVICE_WACOM(0x22) }, { USB_DEVICE_WACOM(0x23) }, { USB_DEVICE_WACOM(0x24) }, { USB_DEVICE_WACOM(0x26) }, { USB_DEVICE_WACOM(0x27) }, { USB_DEVICE_WACOM(0x28) }, { USB_DEVICE_WACOM(0x29) }, { USB_DEVICE_WACOM(0x2A) }, { USB_DEVICE_WACOM(0x30) }, { USB_DEVICE_WACOM(0x31) }, { USB_DEVICE_WACOM(0x32) }, { USB_DEVICE_WACOM(0x33) }, { USB_DEVICE_WACOM(0x34) }, { USB_DEVICE_WACOM(0x35) }, { USB_DEVICE_WACOM(0x37) }, { USB_DEVICE_WACOM(0x38) }, { USB_DEVICE_WACOM(0x39) }, { USB_DEVICE_WACOM(0x3F) }, { USB_DEVICE_WACOM(0x41) }, { USB_DEVICE_WACOM(0x42) }, { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0x57) }, { USB_DEVICE_WACOM(0x59) }, { USB_DEVICE_WACOM(0x5B) }, { USB_DEVICE_WACOM(0x5D) }, { USB_DEVICE_WACOM(0x5E) }, { USB_DEVICE_WACOM(0x60) }, { USB_DEVICE_WACOM(0x61) }, { USB_DEVICE_WACOM(0x62) }, { USB_DEVICE_WACOM(0x63) }, { USB_DEVICE_WACOM(0x64) }, { USB_DEVICE_WACOM(0x65) }, { USB_DEVICE_WACOM(0x69) }, { USB_DEVICE_WACOM(0x6A) }, { USB_DEVICE_WACOM(0x6B) }, { BT_DEVICE_WACOM(0x81) }, { USB_DEVICE_WACOM(0x84) }, { USB_DEVICE_WACOM(0x90) }, { USB_DEVICE_WACOM(0x93) }, { USB_DEVICE_WACOM(0x94) }, { USB_DEVICE_WACOM(0x97) }, { USB_DEVICE_WACOM(0x9A) }, { USB_DEVICE_WACOM(0x9F) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, { USB_DEVICE_WACOM(0xB3) }, { USB_DEVICE_WACOM(0xB4) }, { USB_DEVICE_WACOM(0xB5) }, { USB_DEVICE_WACOM(0xB7) }, { USB_DEVICE_WACOM(0xB8) }, { USB_DEVICE_WACOM(0xB9) }, { USB_DEVICE_WACOM(0xBA) }, { USB_DEVICE_WACOM(0xBB) }, { USB_DEVICE_WACOM(0xBC) }, { BT_DEVICE_WACOM(0xBD) }, { USB_DEVICE_WACOM(0xC0) }, { USB_DEVICE_WACOM(0xC2) }, { USB_DEVICE_WACOM(0xC4) }, { USB_DEVICE_WACOM(0xC5) }, { USB_DEVICE_WACOM(0xC6) }, { USB_DEVICE_WACOM(0xC7) }, { USB_DEVICE_WACOM(0xCC) }, { USB_DEVICE_WACOM(0xCE) }, { USB_DEVICE_WACOM(0xD0) }, { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, { USB_DEVICE_WACOM(0xD3) }, { USB_DEVICE_WACOM(0xD4) }, { USB_DEVICE_WACOM(0xD5) }, { USB_DEVICE_WACOM(0xD6) }, { USB_DEVICE_WACOM(0xD7) }, { USB_DEVICE_WACOM(0xD8) }, { USB_DEVICE_WACOM(0xDA) }, { USB_DEVICE_WACOM(0xDB) }, { USB_DEVICE_WACOM(0xDD) }, { USB_DEVICE_WACOM(0xDE) }, { USB_DEVICE_WACOM(0xDF) }, { USB_DEVICE_WACOM(0xE2) }, { USB_DEVICE_WACOM(0xE3) }, { USB_DEVICE_WACOM(0xE5) }, { USB_DEVICE_WACOM(0xE6) }, { USB_DEVICE_WACOM(0xEC) }, { USB_DEVICE_WACOM(0xED) }, { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0xF0) }, { USB_DEVICE_WACOM(0xF4) }, { USB_DEVICE_WACOM(0xF6) }, { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_WACOM(0xFB) }, { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, { USB_DEVICE_WACOM(0x10E) }, { USB_DEVICE_WACOM(0x10F) }, { USB_DEVICE_WACOM(0x116) }, { USB_DEVICE_WACOM(0x12C) }, { USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x301) }, { USB_DEVICE_WACOM(0x302) }, { USB_DEVICE_WACOM(0x303) }, { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x307) }, { USB_DEVICE_WACOM(0x309) }, { USB_DEVICE_WACOM(0x30A) }, { USB_DEVICE_WACOM(0x30C) }, { USB_DEVICE_WACOM(0x30E) }, { USB_DEVICE_WACOM(0x314) }, { USB_DEVICE_WACOM(0x315) }, { USB_DEVICE_WACOM(0x317) }, { USB_DEVICE_WACOM(0x318) }, { USB_DEVICE_WACOM(0x319) }, { USB_DEVICE_WACOM(0x323) }, { USB_DEVICE_WACOM(0x325) }, { USB_DEVICE_WACOM(0x326) }, { USB_DEVICE_WACOM(0x32A) }, { USB_DEVICE_WACOM(0x32B) }, { USB_DEVICE_WACOM(0x32C) }, { USB_DEVICE_WACOM(0x32F) }, { USB_DEVICE_WACOM(0x331) }, { USB_DEVICE_WACOM(0x333) }, { USB_DEVICE_WACOM(0x335) }, { USB_DEVICE_WACOM(0x336) }, { USB_DEVICE_WACOM(0x33B) }, { USB_DEVICE_WACOM(0x33C) }, { USB_DEVICE_WACOM(0x33D) }, { USB_DEVICE_WACOM(0x33E) }, { USB_DEVICE_WACOM(0x343) }, { BT_DEVICE_WACOM(0x360) }, { BT_DEVICE_WACOM(0x361) }, { BT_DEVICE_WACOM(0x377) }, { BT_DEVICE_WACOM(0x379) }, { USB_DEVICE_WACOM(0x37A) }, { USB_DEVICE_WACOM(0x37B) }, { BT_DEVICE_WACOM(0x393) }, { BT_DEVICE_WACOM(0x3c6) }, { BT_DEVICE_WACOM(0x3c8) }, { BT_DEVICE_WACOM(0x3dd) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x4004) }, { USB_DEVICE_WACOM(0x5000) }, { USB_DEVICE_WACOM(0x5002) }, { USB_DEVICE_LENOVO(0x6004) }, { USB_DEVICE_WACOM(HID_ANY_ID) }, { I2C_DEVICE_WACOM(HID_ANY_ID) }, { PCI_DEVICE_WACOM(HID_ANY_ID) }, { BT_DEVICE_WACOM(HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, wacom_ids); |
| 3 3 3 3 3 3 3 3 3 12 12 8 8 1 7 1 6 6 4 5 1 1 3 3 3 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ASIX AX88172A based USB 2.0 Ethernet Devices * Copyright (C) 2012 OMICRON electronics GmbH * * Supports external PHYs via phylib. Based on the driver for the * AX88772. Original copyrights follow: * * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2006 James Painter <jamie.painter@iname.com> * Copyright (c) 2002-2003 TiVo Inc. */ #include "asix.h" #include <linux/phy.h> struct ax88172a_private { struct mii_bus *mdio; struct phy_device *phydev; char phy_name[PHY_ID_SIZE]; u8 phy_addr; u16 oldmode; int use_embdphy; struct asix_rx_fixup_info rx_fixup_info; }; /* set MAC link settings according to information from phylib */ static void ax88172a_adjust_link(struct net_device *netdev) { struct phy_device *phydev = netdev->phydev; struct usbnet *dev = netdev_priv(netdev); struct ax88172a_private *priv = dev->driver_priv; u16 mode = 0; if (phydev->link) { mode = AX88772_MEDIUM_DEFAULT; if (phydev->duplex == DUPLEX_HALF) mode &= ~AX_MEDIUM_FD; if (phydev->speed != SPEED_100) mode &= ~AX_MEDIUM_PS; } if (mode != priv->oldmode) { asix_write_medium_mode(dev, mode, 0); priv->oldmode = mode; netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n", phydev->speed, phydev->duplex, mode); phy_print_status(phydev); } } static void ax88172a_status(struct usbnet *dev, struct urb *urb) { /* link changes are detected by polling the phy */ } /* use phylib infrastructure */ static int ax88172a_init_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; int ret; priv->mdio = mdiobus_alloc(); if (!priv->mdio) { netdev_err(dev->net, "Could not allocate MDIO bus\n"); return -ENOMEM; } priv->mdio->priv = (void *)dev; priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); ret = mdiobus_register(priv->mdio); if (ret) { netdev_err(dev->net, "Could not register MDIO bus\n"); goto mfree; } netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id); return 0; mfree: mdiobus_free(priv->mdio); return ret; } static void ax88172a_remove_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id); mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); } static const struct net_device_ops ax88172a_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, }; static const struct ethtool_ops ax88172a_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_wol = asix_get_wol, .set_wol = asix_set_wol, .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = phy_ethtool_nway_reset, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy) { int ret; ret = asix_sw_reset(dev, AX_SWRESET_IPPD, 0); if (ret < 0) goto err; msleep(150); ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, 0); if (ret < 0) goto err; msleep(150); ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD, 0); if (ret < 0) goto err; return 0; err: return ret; } static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; u8 buf[ETH_ALEN]; struct ax88172a_private *priv; ret = usbnet_get_endpoints(dev, intf); if (ret) return ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->driver_priv = priv; /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); ret = -EIO; goto free; } eth_hw_addr_set(dev->net, buf); dev->net->netdev_ops = &ax88172a_netdev_ops; dev->net->ethtool_ops = &ax88172a_ethtool_ops; /* are we using the internal or the external phy? */ ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf, 0); if (ret < 0) { netdev_err(dev->net, "Failed to read software interface selection register: %d\n", ret); goto free; } netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]); switch (buf[0] & AX_PHY_SELECT_MASK) { case AX_PHY_SELECT_INTERNAL: netdev_dbg(dev->net, "use internal phy\n"); priv->use_embdphy = 1; break; case AX_PHY_SELECT_EXTERNAL: netdev_dbg(dev->net, "use external phy\n"); priv->use_embdphy = 0; break; default: netdev_err(dev->net, "Interface mode not supported by driver\n"); ret = -ENOTSUPP; goto free; } ret = asix_read_phy_addr(dev, priv->use_embdphy); if (ret < 0) goto free; priv->phy_addr = ret; ax88172a_reset_phy(dev, priv->use_embdphy); /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support jumbo eth frames */ dev->rx_urb_size = 2048; } /* init MDIO bus */ ret = ax88172a_init_mdio(dev); if (ret) goto free; return 0; free: kfree(priv); return ret; } static int ax88172a_stop(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; netdev_dbg(dev->net, "Stopping interface\n"); if (priv->phydev) { netdev_info(dev->net, "Disconnecting from phy %s\n", priv->phy_name); phy_stop(priv->phydev); phy_disconnect(priv->phydev); } return 0; } static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf) { struct ax88172a_private *priv = dev->driver_priv; ax88172a_remove_mdio(dev); kfree(priv); } static int ax88172a_reset(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; struct ax88172a_private *priv = dev->driver_priv; int ret; u16 rx_ctl; ax88172a_reset_phy(dev, priv->use_embdphy); msleep(150); rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl); ret = asix_write_rx_ctl(dev, 0x0000, 0); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl); msleep(150); ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0, AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL, 0); if (ret < 0) { netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; } /* Rewrite MAC address */ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, 0); if (ret < 0) goto out; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", rx_ctl); rx_ctl = asix_read_medium_status(dev, 0); netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n", rx_ctl); /* Connect to PHY */ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr); priv->phydev = phy_connect(dev->net, priv->phy_name, &ax88172a_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phydev)) { netdev_err(dev->net, "Could not connect to PHY device %s\n", priv->phy_name); ret = PTR_ERR(priv->phydev); goto out; } netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name); /* During power-up, the AX88172A set the power down (BMCR_PDOWN) * bit of the PHY. Bring the PHY up again. */ genphy_resume(priv->phydev); phy_start(priv->phydev); return 0; out: return ret; } static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct ax88172a_private *dp = dev->driver_priv; struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; return asix_rx_fixup_internal(dev, skb, rx); } const struct driver_info ax88172a_info = { .description = "ASIX AX88172A USB 2.0 Ethernet", .bind = ax88172a_bind, .reset = ax88172a_reset, .stop = ax88172a_stop, .unbind = ax88172a_unbind, .status = ax88172a_status, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = ax88172a_rx_fixup, .tx_fixup = asix_tx_fixup, }; |
| 188 188 189 189 189 305 307 307 306 305 307 305 304 307 64 64 64 64 64 6 6 64 63 64 63 64 64 64 64 63 64 64 16 22 144 131 23 16 143 7 624 13 614 50 49 50 138 138 138 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/file_table.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/eventpoll.h> #include <linux/rcupdate.h> #include <linux/mount.h> #include <linux/capability.h> #include <linux/cdev.h> #include <linux/fsnotify.h> #include <linux/sysctl.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/task_work.h> #include <linux/swap.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include "internal.h" /* sysctl tunables... */ static struct files_stat_struct files_stat = { .max_files = NR_FILE }; /* SLAB cache for file structures */ static struct kmem_cache *filp_cachep __ro_after_init; static struct kmem_cache *bfilp_cachep __ro_after_init; static struct percpu_counter nr_files __cacheline_aligned_in_smp; /* Container for backing file with optional user path */ struct backing_file { struct file file; union { struct path user_path; freeptr_t bf_freeptr; }; }; #define backing_file(f) container_of(f, struct backing_file, file) const struct path *backing_file_user_path(const struct file *f) { return &backing_file(f)->user_path; } EXPORT_SYMBOL_GPL(backing_file_user_path); void backing_file_set_user_path(struct file *f, const struct path *path) { backing_file(f)->user_path = *path; } EXPORT_SYMBOL_GPL(backing_file_set_user_path); static inline void file_free(struct file *f) { security_file_free(f); if (likely(!(f->f_mode & FMODE_NOACCOUNT))) percpu_counter_dec(&nr_files); put_cred(f->f_cred); if (unlikely(f->f_mode & FMODE_BACKING)) { path_put(backing_file_user_path(f)); kmem_cache_free(bfilp_cachep, backing_file(f)); } else { kmem_cache_free(filp_cachep, f); } } /* * Return the total number of open files in the system */ static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } /* * Return the maximum number of open files in the system */ unsigned long get_max_files(void) { return files_stat.max_files; } EXPORT_SYMBOL_GPL(get_max_files); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) /* * Handle nr_files sysctl */ static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = percpu_counter_sum_positive(&nr_files); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } static const struct ctl_table fs_stat_sysctls[] = { { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = SYSCTL_LONG_ZERO, .extra2 = SYSCTL_LONG_MAX, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, }; static int __init init_fs_stat_sysctls(void) { register_sysctl_init("fs", fs_stat_sysctls); if (IS_ENABLED(CONFIG_BINFMT_MISC)) { struct ctl_table_header *hdr; hdr = register_sysctl_mount_point("fs/binfmt_misc"); kmemleak_not_leak(hdr); } return 0; } fs_initcall(init_fs_stat_sysctls); #endif static int init_file(struct file *f, int flags, const struct cred *cred) { int error; f->f_cred = get_cred(cred); error = security_file_alloc(f); if (unlikely(error)) { put_cred(f->f_cred); return error; } spin_lock_init(&f->f_lock); /* * Note that f_pos_lock is only used for files raising * FMODE_ATOMIC_POS and directories. Other files such as pipes * don't need it and since f_pos_lock is in a union may reuse * the space for other purposes. They are expected to initialize * the respective member when opening the file. */ mutex_init(&f->f_pos_lock); memset(&f->__f_path, 0, sizeof(f->f_path)); memset(&f->f_ra, 0, sizeof(f->f_ra)); f->f_flags = flags; f->f_mode = OPEN_FMODE(flags); f->f_op = NULL; f->f_mapping = NULL; f->private_data = NULL; f->f_inode = NULL; f->f_owner = NULL; #ifdef CONFIG_EPOLL f->f_ep = NULL; #endif f->f_iocb_flags = 0; f->f_pos = 0; f->f_wb_err = 0; f->f_sb_err = 0; /* * We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While * fget-rcu pattern users need to be able to handle spurious * refcount bumps we should reinitialize the reused file first. */ file_ref_init(&f->f_ref, 1); /* * Disable permission and pre-content events for all files by default. * They may be enabled later by fsnotify_open_perm_and_set_mode(). */ file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM); return 0; } /* Find an unused file structure and return a pointer to it. * Returns an error pointer if some error happend e.g. we over file * structures limit, run out of memory or operation is not permitted. * * Be very careful using this. You are responsible for * getting write access to any mount that you might assign * to this filp, if it is opened for write. If this is not * done, you will imbalance int the mount's writer count * and a warning at __fput() time. */ struct file *alloc_empty_file(int flags, const struct cred *cred) { static long old_max; struct file *f; int error; /* * Privileged users can go above max_files */ if (unlikely(get_nr_files() >= files_stat.max_files) && !capable(CAP_SYS_ADMIN)) { /* * percpu_counters are inaccurate. Do an expensive check before * we go and fail. */ if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) goto over; } f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM); error = init_file(f, flags, cred); if (unlikely(error)) { kmem_cache_free(filp_cachep, f); return ERR_PTR(error); } percpu_counter_inc(&nr_files); return f; over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } return ERR_PTR(-ENFILE); } /* * Variant of alloc_empty_file() that doesn't check and modify nr_files. * * This is only for kernel internal use, and the allocate file must not be * installed into file tables or such. */ struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) { struct file *f; int error; f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM); error = init_file(f, flags, cred); if (unlikely(error)) { kmem_cache_free(filp_cachep, f); return ERR_PTR(error); } f->f_mode |= FMODE_NOACCOUNT; return f; } /* * Variant of alloc_empty_file() that allocates a backing_file container * and doesn't check and modify nr_files. * * This is only for kernel internal use, and the allocate file must not be * installed into file tables or such. */ struct file *alloc_empty_backing_file(int flags, const struct cred *cred) { struct backing_file *ff; int error; ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL); if (unlikely(!ff)) return ERR_PTR(-ENOMEM); error = init_file(&ff->file, flags, cred); if (unlikely(error)) { kmem_cache_free(bfilp_cachep, ff); return ERR_PTR(error); } ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT; return &ff->file; } /** * file_init_path - initialize a 'struct file' based on path * * @file: the file to set up * @path: the (dentry, vfsmount) pair for the new file * @fop: the 'struct file_operations' for the new file */ static void file_init_path(struct file *file, const struct path *path, const struct file_operations *fop) { file->__f_path = *path; file->f_inode = path->dentry->d_inode; file->f_mapping = path->dentry->d_inode->i_mapping; file->f_wb_err = filemap_sample_wb_err(file->f_mapping); file->f_sb_err = file_sample_sb_err(file); if (fop->llseek) file->f_mode |= FMODE_LSEEK; if ((file->f_mode & FMODE_READ) && likely(fop->read || fop->read_iter)) file->f_mode |= FMODE_CAN_READ; if ((file->f_mode & FMODE_WRITE) && likely(fop->write || fop->write_iter)) file->f_mode |= FMODE_CAN_WRITE; file->f_iocb_flags = iocb_flags(file); file->f_mode |= FMODE_OPENED; file->f_op = fop; if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(path->dentry->d_inode); } /** * alloc_file - allocate and initialize a 'struct file' * * @path: the (dentry, vfsmount) pair for the new file * @flags: O_... flags with which the new file will be opened * @fop: the 'struct file_operations' for the new file */ static struct file *alloc_file(const struct path *path, int flags, const struct file_operations *fop) { struct file *file; file = alloc_empty_file(flags, current_cred()); if (!IS_ERR(file)) file_init_path(file, path, fop); return file; } static inline int alloc_path_pseudo(const char *name, struct inode *inode, struct vfsmount *mnt, struct path *path) { path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name)); if (!path->dentry) return -ENOMEM; path->mnt = mntget(mnt); d_instantiate(path->dentry, inode); return 0; } struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, const char *name, int flags, const struct file_operations *fops) { int ret; struct path path; struct file *file; ret = alloc_path_pseudo(name, inode, mnt, &path); if (ret) return ERR_PTR(ret); file = alloc_file(&path, flags, fops); if (IS_ERR(file)) { ihold(inode); path_put(&path); return file; } /* * Disable all fsnotify events for pseudo files by default. * They may be enabled by caller with file_set_fsnotify_mode(). */ file_set_fsnotify_mode(file, FMODE_NONOTIFY); return file; } EXPORT_SYMBOL(alloc_file_pseudo); struct file *alloc_file_pseudo_noaccount(struct inode *inode, struct vfsmount *mnt, const char *name, int flags, const struct file_operations *fops) { int ret; struct path path; struct file *file; ret = alloc_path_pseudo(name, inode, mnt, &path); if (ret) return ERR_PTR(ret); file = alloc_empty_file_noaccount(flags, current_cred()); if (IS_ERR(file)) { ihold(inode); path_put(&path); return file; } file_init_path(file, &path, fops); /* * Disable all fsnotify events for pseudo files by default. * They may be enabled by caller with file_set_fsnotify_mode(). */ file_set_fsnotify_mode(file, FMODE_NONOTIFY); return file; } EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount); struct file *alloc_file_clone(struct file *base, int flags, const struct file_operations *fops) { struct file *f; f = alloc_file(&base->f_path, flags, fops); if (!IS_ERR(f)) { path_get(&f->f_path); f->f_mapping = base->f_mapping; } return f; } /* the real guts of fput() - releasing the last reference to file */ static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = file->f_inode; fmode_t mode = file->f_mode; if (unlikely(!(file->f_mode & FMODE_OPENED))) goto out; might_sleep(); fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. */ eventpoll_release(file); locks_remove_file(file); security_file_release(file); if (unlikely(file->f_flags & FASYNC)) { if (file->f_op->fasync) file->f_op->fasync(-1, file, 0); } if (file->f_op->release) file->f_op->release(inode, file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && !(mode & FMODE_PATH))) { cdev_put(inode->i_cdev); } fops_put(file->f_op); file_f_owner_release(file); put_file_access(file); dput(dentry); if (unlikely(mode & FMODE_NEED_UNMOUNT)) dissolve_on_fput(mnt); mntput(mnt); out: file_free(file); } static LLIST_HEAD(delayed_fput_list); static void delayed_fput(struct work_struct *unused) { struct llist_node *node = llist_del_all(&delayed_fput_list); struct file *f, *t; llist_for_each_entry_safe(f, t, node, f_llist) __fput(f); } static void ____fput(struct callback_head *work) { __fput(container_of(work, struct file, f_task_work)); } static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); /* * If kernel thread really needs to have the final fput() it has done * to complete, call this. The only user right now is the boot - we * *do* need to make sure our writes to binaries on initramfs has * not left us with opened struct file waiting for __fput() - execve() * won't work without that. Please, don't add more callers without * very good reasons; in particular, never call that with locks * held and never call that from a thread that might need to do * some work on any kind of umount. */ void flush_delayed_fput(void) { delayed_fput(NULL); flush_delayed_work(&delayed_fput_work); } EXPORT_SYMBOL_GPL(flush_delayed_fput); static void __fput_deferred(struct file *file) { struct task_struct *task = current; if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) { file_free(file); return; } if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { init_task_work(&file->f_task_work, ____fput); if (!task_work_add(task, &file->f_task_work, TWA_RESUME)) return; /* * After this task has run exit_task_work(), * task_work_add() will fail. Fall through to delayed * fput to avoid leaking *file. */ } if (llist_add(&file->f_llist, &delayed_fput_list)) schedule_delayed_work(&delayed_fput_work, 1); } void fput(struct file *file) { if (unlikely(file_ref_put(&file->f_ref))) __fput_deferred(file); } EXPORT_SYMBOL(fput); /* * synchronous analog of fput(); for kernel threads that might be needed * in some umount() (and thus can't use flush_delayed_fput() without * risking deadlocks), need to wait for completion of __fput() and know * for this specific struct file it won't involve anything that would * need them. Use only if you really need it - at the very least, * don't blindly convert fput() by kernel thread to that. */ void __fput_sync(struct file *file) { if (file_ref_put(&file->f_ref)) __fput(file); } EXPORT_SYMBOL(__fput_sync); /* * Equivalent to __fput_sync(), but optimized for being called with the last * reference. * * See file_ref_put_close() for details. */ void fput_close_sync(struct file *file) { if (likely(file_ref_put_close(&file->f_ref))) __fput(file); } /* * Equivalent to fput(), but optimized for being called with the last * reference. * * See file_ref_put_close() for details. */ void fput_close(struct file *file) { if (file_ref_put_close(&file->f_ref)) __fput_deferred(file); } void __init files_init(void) { struct kmem_cache_args args = { .use_freeptr_offset = true, .freeptr_offset = offsetof(struct file, f_freeptr), }; filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU); args.freeptr_offset = offsetof(struct backing_file, bf_freeptr); bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file), &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU); percpu_counter_init(&nr_files, 0, GFP_KERNEL); } /* * One file with associated inode and dcache is very roughly 1K. Per default * do not use more than 10% of our memory for files. */ void __init files_maxfiles_init(void) { unsigned long n; unsigned long nr_pages = totalram_pages(); unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; memreserve = min(memreserve, nr_pages - 1); n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); } |
| 24 23 23 23 23 31 31 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/time.h> #include <linux/gcd.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/timer.h> #include "pcm_local.h" /* * Timer functions */ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) { unsigned long rate, mult, fsize, l, post; struct snd_pcm_runtime *runtime = substream->runtime; mult = 1000000000; rate = runtime->rate; if (snd_BUG_ON(!rate)) return; l = gcd(mult, rate); mult /= l; rate /= l; fsize = runtime->period_size; if (snd_BUG_ON(!fsize)) return; l = gcd(rate, fsize); rate /= l; fsize /= l; post = 1; while ((mult * fsize) / fsize != mult) { mult /= 2; post *= 2; } if (rate == 0) { pcm_err(substream->pcm, "pcm timer resolution out of range (rate = %u, period_size = %lu)\n", runtime->rate, runtime->period_size); runtime->timer_resolution = -1; return; } runtime->timer_resolution = (mult * fsize / rate) * post; } static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = timer->private_data; return substream->runtime ? substream->runtime->timer_resolution : 0; } static int snd_pcm_timer_start(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 1; return 0; } static int snd_pcm_timer_stop(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 0; return 0; } static const struct snd_timer_hardware snd_pcm_timer = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_SLAVE, .resolution = 0, .ticks = 1, .c_resolution = snd_pcm_timer_resolution, .start = snd_pcm_timer_start, .stop = snd_pcm_timer_stop, }; /* * Init functions */ static void snd_pcm_timer_free(struct snd_timer *timer) { struct snd_pcm_substream *substream = timer->private_data; substream->timer = NULL; } void snd_pcm_timer_init(struct snd_pcm_substream *substream) { struct snd_timer_id tid; struct snd_timer *timer; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.dev_class = SNDRV_TIMER_CLASS_PCM; tid.card = substream->pcm->card->number; tid.device = substream->pcm->device; tid.subdevice = (substream->number << 1) | (substream->stream & 1); if (snd_timer_new(substream->pcm->card, "PCM", &tid, &timer) < 0) return; sprintf(timer->name, "PCM %s %i-%i-%i", snd_pcm_direction_name(substream->stream), tid.card, tid.device, tid.subdevice); timer->hw = snd_pcm_timer; if (snd_device_register(timer->card, timer) < 0) { snd_device_free(timer->card, timer); return; } timer->private_data = substream; timer->private_free = snd_pcm_timer_free; substream->timer = timer; } void snd_pcm_timer_done(struct snd_pcm_substream *substream) { if (substream->timer) { snd_device_free(substream->pcm->card, substream->timer); substream->timer = NULL; } } |
| 6 4 1 4 6 4 3 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 | // SPDX-License-Identifier: GPL-2.0+ /* * USB Compaq iPAQ driver * * Copyright (C) 2001 - 2002 * Ganesh Varadarajan <ganesh@veritas.com> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define KP_RETRIES 100 #define DRIVER_AUTHOR "Ganesh Varadarajan <ganesh@veritas.com>" #define DRIVER_DESC "USB PocketPC PDA driver" static int connect_retries = KP_RETRIES; static int initial_wait; /* Function prototypes for an ipaq */ static int ipaq_open(struct tty_struct *tty, struct usb_serial_port *port); static int ipaq_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds); static int ipaq_startup(struct usb_serial *serial); static const struct usb_device_id ipaq_id_table[] = { { USB_DEVICE(0x0104, 0x00BE) }, /* Socket USB Sync */ { USB_DEVICE(0x03F0, 0x1016) }, /* HP USB Sync */ { USB_DEVICE(0x03F0, 0x1116) }, /* HP USB Sync 1611 */ { USB_DEVICE(0x03F0, 0x1216) }, /* HP USB Sync 1612 */ { USB_DEVICE(0x03F0, 0x2016) }, /* HP USB Sync 1620 */ { USB_DEVICE(0x03F0, 0x2116) }, /* HP USB Sync 1621 */ { USB_DEVICE(0x03F0, 0x2216) }, /* HP USB Sync 1622 */ { USB_DEVICE(0x03F0, 0x3016) }, /* HP USB Sync 1630 */ { USB_DEVICE(0x03F0, 0x3116) }, /* HP USB Sync 1631 */ { USB_DEVICE(0x03F0, 0x3216) }, /* HP USB Sync 1632 */ { USB_DEVICE(0x03F0, 0x4016) }, /* HP USB Sync 1640 */ { USB_DEVICE(0x03F0, 0x4116) }, /* HP USB Sync 1641 */ { USB_DEVICE(0x03F0, 0x4216) }, /* HP USB Sync 1642 */ { USB_DEVICE(0x03F0, 0x5016) }, /* HP USB Sync 1650 */ { USB_DEVICE(0x03F0, 0x5116) }, /* HP USB Sync 1651 */ { USB_DEVICE(0x03F0, 0x5216) }, /* HP USB Sync 1652 */ { USB_DEVICE(0x0409, 0x00D5) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x00D6) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x00D7) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x8024) }, /* NEC USB Sync */ { USB_DEVICE(0x0409, 0x8025) }, /* NEC USB Sync */ { USB_DEVICE(0x043E, 0x9C01) }, /* LGE USB Sync */ { USB_DEVICE(0x045E, 0x00CE) }, /* Microsoft USB Sync */ { USB_DEVICE(0x045E, 0x0400) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0401) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0402) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0403) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0404) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0405) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0406) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0407) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0408) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0409) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040A) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040B) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040C) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040D) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040E) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x040F) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0410) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0411) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0412) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0413) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0414) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0415) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0416) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0417) }, /* Windows Powered Pocket PC 2002 */ { USB_DEVICE(0x045E, 0x0432) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0433) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0434) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0435) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0436) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0437) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0438) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0439) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x043F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0440) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0441) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0442) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0443) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0444) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0445) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0446) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0447) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0448) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0449) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x044F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0450) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0451) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0452) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0453) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0454) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0455) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0456) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0457) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0458) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0459) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x045F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0460) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0461) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0462) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0463) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0464) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0465) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0466) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0467) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0468) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0469) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046C) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046D) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046E) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x046F) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0470) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0471) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0472) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0473) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0474) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0475) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0476) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0477) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0478) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x0479) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x047A) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x047B) }, /* Windows Powered Pocket PC 2003 */ { USB_DEVICE(0x045E, 0x04C8) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04C9) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CA) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CB) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CC) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CD) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04CE) }, /* Windows Powered Smartphone 2002 */ { USB_DEVICE(0x045E, 0x04D7) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04D8) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04D9) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DA) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DB) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DC) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DD) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DE) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04DF) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E0) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E1) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E2) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E3) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E4) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E5) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E6) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E7) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E8) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04E9) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x045E, 0x04EA) }, /* Windows Powered Smartphone 2003 */ { USB_DEVICE(0x049F, 0x0003) }, /* Compaq iPAQ USB Sync */ { USB_DEVICE(0x049F, 0x0032) }, /* Compaq iPAQ USB Sync */ { USB_DEVICE(0x04A4, 0x0014) }, /* Hitachi USB Sync */ { USB_DEVICE(0x04AD, 0x0301) }, /* USB Sync 0301 */ { USB_DEVICE(0x04AD, 0x0302) }, /* USB Sync 0302 */ { USB_DEVICE(0x04AD, 0x0303) }, /* USB Sync 0303 */ { USB_DEVICE(0x04AD, 0x0306) }, /* GPS Pocket PC USB Sync */ { USB_DEVICE(0x04B7, 0x0531) }, /* MyGuide 7000 XL USB Sync */ { USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ { USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */ { USB_DEVICE(0x04DD, 0x91AC) }, /* SHARP WS011SH USB Modem */ { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F03) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F04) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x6611) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6613) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6615) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6617) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6619) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x661B) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x662E) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6630) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04E8, 0x6632) }, /* Samsung MITs USB Sync */ { USB_DEVICE(0x04f1, 0x3011) }, /* JVC USB Sync */ { USB_DEVICE(0x04F1, 0x3012) }, /* JVC USB Sync */ { USB_DEVICE(0x0502, 0x1631) }, /* c10 Series */ { USB_DEVICE(0x0502, 0x1632) }, /* c20 Series */ { USB_DEVICE(0x0502, 0x16E1) }, /* Acer n10 Handheld USB Sync */ { USB_DEVICE(0x0502, 0x16E2) }, /* Acer n20 Handheld USB Sync */ { USB_DEVICE(0x0502, 0x16E3) }, /* Acer n30 Handheld USB Sync */ { USB_DEVICE(0x0536, 0x01A0) }, /* HHP PDT */ { USB_DEVICE(0x0543, 0x0ED9) }, /* ViewSonic Color Pocket PC V35 */ { USB_DEVICE(0x0543, 0x1527) }, /* ViewSonic Color Pocket PC V36 */ { USB_DEVICE(0x0543, 0x1529) }, /* ViewSonic Color Pocket PC V37 */ { USB_DEVICE(0x0543, 0x152B) }, /* ViewSonic Color Pocket PC V38 */ { USB_DEVICE(0x0543, 0x152E) }, /* ViewSonic Pocket PC */ { USB_DEVICE(0x0543, 0x1921) }, /* ViewSonic Communicator Pocket PC */ { USB_DEVICE(0x0543, 0x1922) }, /* ViewSonic Smartphone */ { USB_DEVICE(0x0543, 0x1923) }, /* ViewSonic Pocket PC V30 */ { USB_DEVICE(0x05E0, 0x2000) }, /* Symbol USB Sync */ { USB_DEVICE(0x05E0, 0x2001) }, /* Symbol USB Sync 0x2001 */ { USB_DEVICE(0x05E0, 0x2002) }, /* Symbol USB Sync 0x2002 */ { USB_DEVICE(0x05E0, 0x2003) }, /* Symbol USB Sync 0x2003 */ { USB_DEVICE(0x05E0, 0x2004) }, /* Symbol USB Sync 0x2004 */ { USB_DEVICE(0x05E0, 0x2005) }, /* Symbol USB Sync 0x2005 */ { USB_DEVICE(0x05E0, 0x2006) }, /* Symbol USB Sync 0x2006 */ { USB_DEVICE(0x05E0, 0x2007) }, /* Symbol USB Sync 0x2007 */ { USB_DEVICE(0x05E0, 0x2008) }, /* Symbol USB Sync 0x2008 */ { USB_DEVICE(0x05E0, 0x2009) }, /* Symbol USB Sync 0x2009 */ { USB_DEVICE(0x05E0, 0x200A) }, /* Symbol USB Sync 0x200A */ { USB_DEVICE(0x067E, 0x1001) }, /* Intermec Mobile Computer */ { USB_DEVICE(0x07CF, 0x2001) }, /* CASIO USB Sync 2001 */ { USB_DEVICE(0x07CF, 0x2002) }, /* CASIO USB Sync 2002 */ { USB_DEVICE(0x07CF, 0x2003) }, /* CASIO USB Sync 2003 */ { USB_DEVICE(0x0930, 0x0700) }, /* TOSHIBA USB Sync 0700 */ { USB_DEVICE(0x0930, 0x0705) }, /* TOSHIBA Pocket PC e310 */ { USB_DEVICE(0x0930, 0x0706) }, /* TOSHIBA Pocket PC e740 */ { USB_DEVICE(0x0930, 0x0707) }, /* TOSHIBA Pocket PC e330 Series */ { USB_DEVICE(0x0930, 0x0708) }, /* TOSHIBA Pocket PC e350 Series */ { USB_DEVICE(0x0930, 0x0709) }, /* TOSHIBA Pocket PC e750 Series */ { USB_DEVICE(0x0930, 0x070A) }, /* TOSHIBA Pocket PC e400 Series */ { USB_DEVICE(0x0930, 0x070B) }, /* TOSHIBA Pocket PC e800 Series */ { USB_DEVICE(0x094B, 0x0001) }, /* Linkup Systems USB Sync */ { USB_DEVICE(0x0960, 0x0065) }, /* BCOM USB Sync 0065 */ { USB_DEVICE(0x0960, 0x0066) }, /* BCOM USB Sync 0066 */ { USB_DEVICE(0x0960, 0x0067) }, /* BCOM USB Sync 0067 */ { USB_DEVICE(0x0961, 0x0010) }, /* Portatec USB Sync */ { USB_DEVICE(0x099E, 0x0052) }, /* Trimble GeoExplorer */ { USB_DEVICE(0x099E, 0x4000) }, /* TDS Data Collector */ { USB_DEVICE(0x0B05, 0x4200) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x4201) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x4202) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x420F) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x9200) }, /* ASUS USB Sync */ { USB_DEVICE(0x0B05, 0x9202) }, /* ASUS USB Sync */ { USB_DEVICE(0x0BB4, 0x00CE) }, /* HTC USB Sync */ { USB_DEVICE(0x0BB4, 0x00CF) }, /* HTC USB Modem */ { USB_DEVICE(0x0BB4, 0x0A01) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A02) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A03) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A04) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A05) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A06) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A07) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A08) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A09) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A0F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A10) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A11) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A12) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A13) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A14) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A15) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A16) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A17) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A18) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A19) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A1F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A20) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A21) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A22) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A23) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A24) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A25) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A26) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A27) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A28) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A29) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A2F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A30) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A31) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A32) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A33) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A34) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A35) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A36) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A37) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A38) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A39) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A3F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A40) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A41) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A42) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A43) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A44) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A45) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A46) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A47) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A48) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A49) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4A) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4B) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4C) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4D) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4E) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A4F) }, /* PocketPC USB Sync */ { USB_DEVICE(0x0BB4, 0x0A50) }, /* HTC SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A51) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A52) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A53) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A54) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A55) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A56) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A57) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A58) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A59) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A5F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A60) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A61) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A62) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A63) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A64) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A65) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A66) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A67) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A68) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A69) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A6F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A70) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A71) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A72) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A73) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A74) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A75) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A76) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A77) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A78) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A79) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A7F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A80) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A81) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A82) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A83) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A84) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A85) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A86) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A87) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A88) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A89) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A8F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A90) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A91) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A92) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A93) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A94) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A95) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A96) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A97) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A98) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A99) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9A) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9B) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9C) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9D) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9E) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0A9F) }, /* SmartPhone USB Sync */ { USB_DEVICE(0x0BB4, 0x0BCE) }, /* "High Tech Computer Corp" */ { USB_DEVICE(0x0BF8, 0x1001) }, /* Fujitsu Siemens Computers USB Sync */ { USB_DEVICE(0x0C44, 0x03A2) }, /* Motorola iDEN Smartphone */ { USB_DEVICE(0x0C8E, 0x6000) }, /* Cesscom Luxian Series */ { USB_DEVICE(0x0CAD, 0x9001) }, /* Motorola PowerPad Pocket PC Device */ { USB_DEVICE(0x0F4E, 0x0200) }, /* Freedom Scientific USB Sync */ { USB_DEVICE(0x0F98, 0x0201) }, /* Cyberbank USB Sync */ { USB_DEVICE(0x0FB8, 0x3001) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x3002) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x3003) }, /* Wistron USB Sync */ { USB_DEVICE(0x0FB8, 0x4001) }, /* Wistron USB Sync */ { USB_DEVICE(0x1066, 0x00CE) }, /* E-TEN USB Sync */ { USB_DEVICE(0x1066, 0x0300) }, /* E-TEN P3XX Pocket PC */ { USB_DEVICE(0x1066, 0x0500) }, /* E-TEN P5XX Pocket PC */ { USB_DEVICE(0x1066, 0x0600) }, /* E-TEN P6XX Pocket PC */ { USB_DEVICE(0x1066, 0x0700) }, /* E-TEN P7XX Pocket PC */ { USB_DEVICE(0x1114, 0x0001) }, /* Psion Teklogix Sync 753x */ { USB_DEVICE(0x1114, 0x0004) }, /* Psion Teklogix Sync netBookPro */ { USB_DEVICE(0x1114, 0x0006) }, /* Psion Teklogix Sync 7525 */ { USB_DEVICE(0x1182, 0x1388) }, /* VES USB Sync */ { USB_DEVICE(0x11D9, 0x1002) }, /* Rugged Pocket PC 2003 */ { USB_DEVICE(0x11D9, 0x1003) }, /* Rugged Pocket PC 2003 */ { USB_DEVICE(0x1231, 0xCE01) }, /* USB Sync 03 */ { USB_DEVICE(0x1231, 0xCE02) }, /* USB Sync 03 */ { USB_DEVICE(0x1690, 0x0601) }, /* Askey USB Sync */ { USB_DEVICE(0x22B8, 0x4204) }, /* Motorola MPx200 Smartphone */ { USB_DEVICE(0x22B8, 0x4214) }, /* Motorola MPc GSM */ { USB_DEVICE(0x22B8, 0x4224) }, /* Motorola MPx220 Smartphone */ { USB_DEVICE(0x22B8, 0x4234) }, /* Motorola MPc CDMA */ { USB_DEVICE(0x22B8, 0x4244) }, /* Motorola MPx100 Smartphone */ { USB_DEVICE(0x3340, 0x011C) }, /* Mio DigiWalker PPC StrongARM */ { USB_DEVICE(0x3340, 0x0326) }, /* Mio DigiWalker 338 */ { USB_DEVICE(0x3340, 0x0426) }, /* Mio DigiWalker 338 */ { USB_DEVICE(0x3340, 0x043A) }, /* Mio DigiWalker USB Sync */ { USB_DEVICE(0x3340, 0x051C) }, /* MiTAC USB Sync 528 */ { USB_DEVICE(0x3340, 0x053A) }, /* Mio DigiWalker SmartPhone USB Sync */ { USB_DEVICE(0x3340, 0x071C) }, /* MiTAC USB Sync */ { USB_DEVICE(0x3340, 0x0B1C) }, /* Generic PPC StrongARM */ { USB_DEVICE(0x3340, 0x0E3A) }, /* Generic PPC USB Sync */ { USB_DEVICE(0x3340, 0x0F1C) }, /* Itautec USB Sync */ { USB_DEVICE(0x3340, 0x0F3A) }, /* Generic SmartPhone USB Sync */ { USB_DEVICE(0x3340, 0x1326) }, /* Itautec USB Sync */ { USB_DEVICE(0x3340, 0x191C) }, /* YAKUMO USB Sync */ { USB_DEVICE(0x3340, 0x2326) }, /* Vobis USB Sync */ { USB_DEVICE(0x3340, 0x3326) }, /* MEDION Winodws Moble USB Sync */ { USB_DEVICE(0x3708, 0x20CE) }, /* Legend USB Sync */ { USB_DEVICE(0x3708, 0x21CE) }, /* Lenovo USB Sync */ { USB_DEVICE(0x4113, 0x0210) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0211) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0400) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x4113, 0x0410) }, /* Mobile Media Technology USB Sync */ { USB_DEVICE(0x413C, 0x4001) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4002) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4003) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4004) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4005) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4006) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4007) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4008) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x413C, 0x4009) }, /* Dell Axim USB Sync */ { USB_DEVICE(0x4505, 0x0010) }, /* Smartphone */ { USB_DEVICE(0x5E04, 0xCE00) }, /* SAGEM Wireless Assistant */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ipaq_id_table); /* All of the device info needed for the Compaq iPAQ */ static struct usb_serial_driver ipaq_device = { .driver = { .name = "ipaq", }, .description = "PocketPC PDA", .id_table = ipaq_id_table, .bulk_in_size = 256, .bulk_out_size = 256, .open = ipaq_open, .attach = ipaq_startup, .calc_num_ports = ipaq_calc_num_ports, }; static struct usb_serial_driver * const serial_drivers[] = { &ipaq_device, NULL }; static int ipaq_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int result = 0; int retries = connect_retries; msleep(1000*initial_wait); /* * Send out control message observed in win98 sniffs. Not sure what * it does, but from empirical observations, it seems that the device * will start the chat sequence once one of these messages gets * through. Since this has a reasonably high failure rate, we retry * several times. */ while (retries) { retries--; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 0x1, 0, NULL, 0, 100); if (!result) break; msleep(1000); } if (!retries && result) { dev_err(&port->dev, "%s - failed doing control urb, error %d\n", __func__, result); return result; } return usb_serial_generic_open(tty, port); } static int ipaq_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { /* * Some of the devices in ipaq_id_table[] are composite, and we * shouldn't bind to all the interfaces. This test will rule out * some obviously invalid possibilities. */ if (epds->num_bulk_in == 0 || epds->num_bulk_out == 0) return -ENODEV; /* * A few devices have four endpoints, seemingly Yakuma devices, and * we need the second pair. */ if (epds->num_bulk_in > 1 && epds->num_bulk_out > 1) { epds->bulk_in[0] = epds->bulk_in[1]; epds->bulk_out[0] = epds->bulk_out[1]; } /* * Other devices have 3 endpoints, but we only use the first bulk in * and out endpoints. */ epds->num_bulk_in = 1; epds->num_bulk_out = 1; return 1; } static int ipaq_startup(struct usb_serial *serial) { if (serial->dev->actconfig->desc.bConfigurationValue != 1) { /* * FIXME: HP iPaq rx3715, possibly others, have 1 config that * is labeled as 2 */ dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } return usb_reset_configuration(serial->dev); } module_usb_serial_driver(serial_drivers, ipaq_id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(connect_retries, int, 0644); MODULE_PARM_DESC(connect_retries, "Maximum number of connect retries (one second each)"); module_param(initial_wait, int, 0644); MODULE_PARM_DESC(initial_wait, "Time to wait before attempting a connection (in seconds)"); |
| 75 29 57 75 29 57 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_SUPER_H #define _LINUX_FS_SUPER_H #include <linux/fs/super_types.h> #include <linux/unicode.h> /* * These are internal functions, please use sb_start_{write,pagefault,intwrite} * instead. */ static inline void __sb_end_write(struct super_block *sb, int level) { percpu_up_read(sb->s_writers.rw_sem + level - 1); } static inline void __sb_start_write(struct super_block *sb, int level) { percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true); } static inline bool __sb_start_write_trylock(struct super_block *sb, int level) { return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1); } #define __sb_writers_acquired(sb, lev) \ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev) - 1], 1, _THIS_IP_) #define __sb_writers_release(sb, lev) \ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev) - 1], _THIS_IP_) /** * __sb_write_started - check if sb freeze level is held * @sb: the super we write to * @level: the freeze level * * * > 0 - sb freeze level is held * * 0 - sb freeze level is not held * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN */ static inline int __sb_write_started(const struct super_block *sb, int level) { return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1); } /** * sb_write_started - check if SB_FREEZE_WRITE is held * @sb: the super we write to * * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. */ static inline bool sb_write_started(const struct super_block *sb) { return __sb_write_started(sb, SB_FREEZE_WRITE); } /** * sb_write_not_started - check if SB_FREEZE_WRITE is not held * @sb: the super we write to * * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. */ static inline bool sb_write_not_started(const struct super_block *sb) { return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0; } /** * sb_end_write - drop write access to a superblock * @sb: the super we wrote to * * Decrement number of writers to the filesystem. Wake up possible waiters * wanting to freeze the filesystem. */ static inline void sb_end_write(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_WRITE); } /** * sb_end_pagefault - drop write access to a superblock from a page fault * @sb: the super we wrote to * * Decrement number of processes handling write page fault to the filesystem. * Wake up possible waiters wanting to freeze the filesystem. */ static inline void sb_end_pagefault(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_PAGEFAULT); } /** * sb_end_intwrite - drop write access to a superblock for internal fs purposes * @sb: the super we wrote to * * Decrement fs-internal number of writers to the filesystem. Wake up possible * waiters wanting to freeze the filesystem. */ static inline void sb_end_intwrite(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_FS); } /** * sb_start_write - get write access to a superblock * @sb: the super we write to * * When a process wants to write data or metadata to a file system (i.e. dirty * a page or an inode), it should embed the operation in a sb_start_write() - * sb_end_write() pair to get exclusion against file system freezing. This * function increments number of writers preventing freezing. If the file * system is already frozen, the function waits until the file system is * thawed. * * Since freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. Generally, * freeze protection should be the outermost lock. In particular, we have: * * sb_start_write * -> i_rwsem (write path, truncate, directory ops, ...) * -> s_umount (freeze_super, thaw_super) */ static inline void sb_start_write(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_WRITE); } DEFINE_GUARD(super_write, struct super_block *, sb_start_write(_T), sb_end_write(_T)) static inline bool sb_start_write_trylock(struct super_block *sb) { return __sb_start_write_trylock(sb, SB_FREEZE_WRITE); } /** * sb_start_pagefault - get write access to a superblock from a page fault * @sb: the super we write to * * When a process starts handling write page fault, it should embed the * operation into sb_start_pagefault() - sb_end_pagefault() pair to get * exclusion against file system freezing. This is needed since the page fault * is going to dirty a page. This function increments number of running page * faults preventing freezing. If the file system is already frozen, the * function waits until the file system is thawed. * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault * handling code implies lock dependency: * * mmap_lock * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_PAGEFAULT); } /** * sb_start_intwrite - get write access to a superblock for internal fs purposes * @sb: the super we write to * * This is the third level of protection against filesystem freezing. It is * free for use by a filesystem. The only requirement is that it must rank * below sb_start_pagefault. * * For example filesystem can call sb_start_intwrite() when starting a * transaction which somewhat eases handling of freezing for internal sources * of filesystem changes (internal fs threads, discarding preallocation on file * close, etc.). */ static inline void sb_start_intwrite(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_FS); } static inline bool sb_start_intwrite_trylock(struct super_block *sb) { return __sb_start_write_trylock(sb, SB_FREEZE_FS); } static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; } static inline bool sb_is_blkdev_sb(struct super_block *sb) { return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; } #if IS_ENABLED(CONFIG_UNICODE) static inline struct unicode_map *sb_encoding(const struct super_block *sb) { return sb->s_encoding; } /* Compare if two super blocks have the same encoding and flags */ static inline bool sb_same_encoding(const struct super_block *sb1, const struct super_block *sb2) { if (sb1->s_encoding == sb2->s_encoding) return true; return (sb1->s_encoding && sb2->s_encoding && (sb1->s_encoding->version == sb2->s_encoding->version) && (sb1->s_encoding_flags == sb2->s_encoding_flags)); } #else static inline struct unicode_map *sb_encoding(const struct super_block *sb) { return NULL; } static inline bool sb_same_encoding(const struct super_block *sb1, const struct super_block *sb2) { return true; } #endif static inline bool sb_has_encoding(const struct super_block *sb) { return !!sb_encoding(sb); } int sb_set_blocksize(struct super_block *sb, int size); int __must_check sb_min_blocksize(struct super_block *sb, int size); int freeze_super(struct super_block *super, enum freeze_holder who, const void *freeze_owner); int thaw_super(struct super_block *super, enum freeze_holder who, const void *freeze_owner); #endif /* _LINUX_FS_SUPER_H */ |
| 788 973 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2021, Google LLC. * Pasha Tatashin <pasha.tatashin@soleen.com> */ #ifndef __LINUX_PAGE_TABLE_CHECK_H #define __LINUX_PAGE_TABLE_CHECK_H #ifdef CONFIG_PAGE_TABLE_CHECK #include <linux/jump_label.h> extern struct static_key_true page_table_check_disabled; extern struct page_ext_operations page_table_check_ops; void __page_table_check_zero(struct page *page, unsigned int order); void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte); void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd); void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud); void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, unsigned int nr); void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, unsigned int nr); void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, unsigned int nr); void __page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd); static inline void page_table_check_alloc(struct page *page, unsigned int order) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_zero(page, order); } static inline void page_table_check_free(struct page *page, unsigned int order) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_zero(page, order); } static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_pte_clear(mm, pte); } static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_pmd_clear(mm, pmd); } static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_pud_clear(mm, pud); } static inline void page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, unsigned int nr) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_ptes_set(mm, ptep, pte, nr); } static inline void page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, unsigned int nr) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_pmds_set(mm, pmdp, pmd, nr); } static inline void page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, unsigned int nr) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_puds_set(mm, pudp, pud, nr); } static inline void page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd) { if (static_branch_likely(&page_table_check_disabled)) return; __page_table_check_pte_clear_range(mm, addr, pmd); } #else static inline void page_table_check_alloc(struct page *page, unsigned int order) { } static inline void page_table_check_free(struct page *page, unsigned int order) { } static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) { } static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) { } static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) { } static inline void page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, unsigned int nr) { } static inline void page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, unsigned int nr) { } static inline void page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, unsigned int nr) { } static inline void page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd) { } #endif /* CONFIG_PAGE_TABLE_CHECK */ #define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1) #define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1) #endif /* __LINUX_PAGE_TABLE_CHECK_H */ |
| 25 25 18 18 25 25 24 18 21 21 21 10 21 24 9 6 6 2 1 6 3 24 24 24 1 1 24 24 24 6 6 1 6 24 2 1 1 24 24 24 24 10 1 9 1 8 1 7 1 25 25 1 24 24 8 16 1 15 1 14 1 14 1 13 13 13 1 12 1 11 11 11 1 10 10 4 6 6 1 5 5 1 4 1 3 3 3 3 3 1 2 1 1 1 1 23 27 2 25 25 1 24 23 24 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 | // SPDX-License-Identifier: GPL-2.0-or-later /*************************************************************************** * * Copyright (C) 2007-2010 SMSC * *****************************************************************************/ #include <linux/module.h> #include <linux/kmod.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/bitrev.h> #include <linux/crc16.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> #include <linux/of_net.h> #include "smsc75xx.h" #define SMSC_CHIPNAME "smsc75xx" #define SMSC_DRIVER_VERSION "1.0.0" #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) #define DEFAULT_BULK_IN_DELAY (0x00002000) #define MAX_SINGLE_PACKET_SIZE (9000) #define LAN75XX_EEPROM_MAGIC (0x7500) #define EEPROM_MAC_OFFSET (0x01) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) #define SMSC75XX_INTERNAL_PHY_ID (1) #define SMSC75XX_TX_OVERHEAD (8) #define MAX_RX_FIFO_SIZE (20 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) #define USB_VENDOR_ID_SMSC (0x0424) #define USB_PRODUCT_ID_LAN7500 (0x7500) #define USB_PRODUCT_ID_LAN7505 (0x7505) #define RXW_PADDING 2 #define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC) #define SUSPEND_SUSPEND0 (0x01) #define SUSPEND_SUSPEND1 (0x02) #define SUSPEND_SUSPEND2 (0x04) #define SUSPEND_SUSPEND3 (0x08) #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) struct smsc75xx_priv { struct usbnet *dev; u32 rfe_ctl; u32 wolopts; u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; struct mutex dataport_mutex; spinlock_t rfe_ctl_lock; struct work_struct set_multicast; u8 suspend_flags; }; static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int smsc75xx_link_ok_nopm(struct usbnet *dev); static int smsc75xx_phy_gig_workaround(struct usbnet *dev); static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_read_cmd; else fn = usbnet_read_cmd_nopm; ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 4)) { ret = ret < 0 ? ret : -ENODATA; netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; } le32_to_cpus(&buf); *data = buf; return ret; } static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_write_cmd; else fn = usbnet_write_cmd_nopm; buf = data; cpu_to_le32s(&buf); ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); return ret; } static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 1); } static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 1); } static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 0); } static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 0); } /* Loop until the read is completed with timeout * called with phy_mutex held */ static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev, int in_pm) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } if (!(val & MII_ACCESS_BUSY)) return 0; } while (!time_after(jiffies, start_time + HZ)); return -EIO; } static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n"); goto done; } /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_READ | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); goto done; } ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } ret = (u16)(val & 0xFFFF); done: mutex_unlock(&dev->phy_mutex); return ret; } static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n"); goto done; } val = regval; ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_WRITE | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); goto done; } done: mutex_unlock(&dev->phy_mutex); } static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 1); } static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1); } static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 0); } static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0); } static int smsc75xx_wait_eeprom(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) break; udelay(40); } while (!time_after(jiffies, start_time + HZ)); if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { netdev_warn(dev->net, "EEPROM read operation timeout\n"); return -EIO; } return 0; } static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY)) return 0; udelay(40); } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy\n"); return -EIO; } static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; for (i = 0; i < length; i++) { val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc75xx_read_reg(dev, E2P_DATA, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_DATA\n"); return ret; } data[i] = val & 0xFF; offset++; } return 0; } static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; /* Issue write/erase enable command */ val = E2P_CMD_BUSY | E2P_CMD_EWEN; ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = smsc75xx_write_reg(dev, E2P_DATA, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_DATA\n"); return ret; } /* Send "write" command */ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; offset++; } return 0; } static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) { int i, ret; for (i = 0; i < 100; i++) { u32 dp_sel; ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); return ret; } if (dp_sel & DP_SEL_DPRDY) return 0; udelay(40); } netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n"); return -EIO; } static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 dp_sel; int i, ret; mutex_lock(&pdata->dataport_mutex); ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n"); goto done; } ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); goto done; } dp_sel &= ~DP_SEL_RSEL; dp_sel |= ram_select; ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_SEL\n"); goto done; } for (i = 0; i < length; i++) { ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_ADDR\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_DATA\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_CMD\n"); goto done; } ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n"); goto done; } } done: mutex_unlock(&pdata->dataport_mutex); return ret; } /* returns hash bit number for given MAC address */ static u32 smsc75xx_hash(char addr[ETH_ALEN]) { return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; } static void smsc75xx_deferred_multicast_write(struct work_struct *param) { struct smsc75xx_priv *pdata = container_of(param, struct smsc75xx_priv, set_multicast); struct usbnet *dev = pdata->dev; int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) netdev_warn(dev->net, "Error writing RFE_CRL\n"); } static void smsc75xx_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int i; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); pdata->rfe_ctl &= ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); pdata->rfe_ctl |= RFE_CTL_AB; for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) pdata->multicast_hash_table[i] = 0; if (dev->net->flags & IFF_PROMISC) { netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; } else if (dev->net->flags & IFF_ALLMULTI) { netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; } else if (!netdev_mc_empty(dev->net)) { struct netdev_hw_addr *ha; netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n"); pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; netdev_for_each_mc_addr(ha, netdev) { u32 bitnum = smsc75xx_hash(ha->addr); pdata->multicast_hash_table[bitnum / 32] |= (1 << (bitnum % 32)); } } else { netif_dbg(dev, drv, dev->net, "receive own packets only\n"); pdata->rfe_ctl |= RFE_CTL_DPF; } spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_multicast); } static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; int ret; if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_TX) { flow = (FLOW_TX_FCEN | 0xFFFF); /* set fct_flow thresholds to 20% and 80% */ fct_flow = (8 << 8) | 32; } if (cap & FLOW_CTRL_RX) flow |= FLOW_RX_FCEN; netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); } ret = smsc75xx_write_reg(dev, FLOW, flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FLOW\n"); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FCT_FLOW\n"); return ret; } return 0; } static int smsc75xx_link_reset(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; u16 lcladv, rmtadv; int ret; /* write to clear phy interrupt status */ smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, PHY_INT_SRC_CLEAR_ALL); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Error writing INT_STS\n"); return ret; } mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n", ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv); return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); } static void smsc75xx_status(struct usbnet *dev, struct urb *urb) { u32 intdata; if (urb->actual_length != 4) { netdev_warn(dev->net, "unexpected urb length %d\n", urb->actual_length); return; } intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT) usbnet_defer_kevent(dev, EVENT_LINK_RESET); else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); } static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) { return MAX_EEPROM_SIZE; } static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); ee->magic = LAN75XX_EEPROM_MAGIC; return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); } static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); if (ee->magic != LAN75XX_EEPROM_MAGIC) { netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n", ee->magic); return -EINVAL; } return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); } static void smsc75xx_ethtool_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); wolinfo->supported = SUPPORTED_WAKE; wolinfo->wolopts = pdata->wolopts; } static int smsc75xx_ethtool_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); int ret; if (wolinfo->wolopts & ~SUPPORTED_WAKE) return -EINVAL; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); if (ret < 0) netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret); return ret; } static const struct ethtool_ops smsc75xx_ethtool_ops = { .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, .get_eeprom = smsc75xx_ethtool_get_eeprom, .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_wol = smsc75xx_ethtool_get_wol, .set_wol = smsc75xx_ethtool_set_wol, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); if (!netif_running(netdev)) return -EINVAL; return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static void smsc75xx_init_mac_address(struct usbnet *dev) { u8 addr[ETH_ALEN]; /* maybe the boot loader passed the MAC address in devicetree */ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* device tree values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n"); return; } } /* try reading mac address from EEPROM */ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) { eth_hw_addr_set(dev->net, addr); if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n"); return; } } /* no useful static MAC address found. generate a random one */ eth_hw_addr_random(dev->net); netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); } static int smsc75xx_set_mac_address(struct usbnet *dev) { u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret); return ret; } addr_hi |= ADDR_FILTX_FB_VALID; ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); if (ret < 0) netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret); return ret; } static int smsc75xx_phy_initialize(struct usbnet *dev) { int bmcr, ret, timeout = 0; /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = smsc75xx_mdio_read; dev->mii.mdio_write = smsc75xx_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; dev->mii.supports_gmii = 1; dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; /* reset phy and wait for reset to complete */ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); do { msleep(10); bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); if (bmcr < 0) { netdev_warn(dev->net, "Error reading MII_BMCR\n"); return bmcr; } timeout++; } while ((bmcr & BMCR_RESET) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset\n"); return -EIO; } /* phy workaround for gig link */ smsc75xx_phy_gig_workaround(dev); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000, ADVERTISE_1000FULL); /* read and write to clear phy interrupt status */ ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT); mii_nway_restart(&dev->mii); netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n"); return 0; } static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) { int ret = 0; u32 buf; bool rxenabled; ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } rxenabled = ((buf & MAC_RX_RXEN) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } if (rxenabled) { buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } return 0; } static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); int ret; ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; } return usbnet_change_mtu(netdev, new_mtu); } /* Enable or disable Rx checksum offload engine */ static int smsc75xx_set_features(struct net_device *netdev, netdev_features_t features) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); if (features & NETIF_F_RXCSUM) pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; else pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Error writing RFE_CTL\n"); return ret; } return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) { int timeout = 0; do { u32 buf; int ret; ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } if (buf & PMT_CTL_DEV_RDY) return 0; msleep(10); timeout++; } while (timeout < 100); netdev_warn(dev->net, "timeout waiting for device ready\n"); return -EIO; } static int smsc75xx_phy_gig_workaround(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; int ret = 0, timeout = 0; u32 buf, link_up = 0; /* Set the phy in Gig loopback */ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040); /* Wait for the link up */ do { link_up = smsc75xx_link_ok_nopm(dev); usleep_range(10000, 20000); timeout++; } while ((!link_up) && (timeout < 1000)); if (timeout >= 1000) { netdev_warn(dev->net, "Timeout waiting for PHY link up\n"); return -EIO; } /* phy reset */ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); return ret; } timeout = 0; do { usleep_range(10000, 20000); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); return -EIO; } return 0; } static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 buf; int ret = 0, timeout; netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n"); ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_reset\n"); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } buf |= HW_CFG_LRST; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } timeout++; } while ((buf & HW_CFG_LRST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on completion of Lite Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n"); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "PHY reset complete\n"); ret = smsc75xx_set_mac_address(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac address\n"); return ret; } netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n", dev->net->dev_addr); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", buf); buf |= HW_CFG_BIR; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n", buf); if (!turbo_mode) { buf = 0; dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; } else if (dev->udev->speed == USB_SPEED_HIGH) { buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; } else { buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; } netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); ret = smsc75xx_write_reg(dev, BURST_CAP, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); if (ret < 0) { netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf); if (turbo_mode) { ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); buf |= (HW_CFG_MEF | HW_CFG_BCE); ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); } /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf); buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, ID_REV, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret); return ret; } /* only set default GPIO/LED settings if no EEPROM is detected */ if (!(buf & E2P_CMD_LOADED)) { ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret); return ret; } buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); return ret; } } ret = smsc75xx_write_reg(dev, FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret); return ret; } /* Don't need rfe_ctl_lock during initialisation */ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n", pdata->rfe_ctl); /* Enable or disable checksum offload engines */ smsc75xx_set_features(dev->net, dev->net->features); smsc75xx_set_multicast(dev->net); ret = smsc75xx_phy_initialize(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); return ret; } /* enable PHY interrupts */ buf |= INT_ENP_PHY_INT; ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); return ret; } /* allow mac to detect speed and duplex from phy */ ret = smsc75xx_read_reg(dev, MAC_CR, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); return ret; } buf |= (MAC_CR_ADD | MAC_CR_ASD); ret = smsc75xx_write_reg(dev, MAC_CR, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, MAC_TX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret); return ret; } buf |= MAC_TX_TXEN; ret = smsc75xx_write_reg(dev, MAC_TX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret); return ret; } buf |= FCT_TX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; } ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret); return ret; } buf |= FCT_RX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf); netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n"); return 0; } static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = smsc75xx_ioctl, .ndo_set_rx_mode = smsc75xx_set_multicast, .ndo_set_features = smsc75xx_set_features, }; static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = NULL; int ret; printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret); return ret; } dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), GFP_KERNEL); pdata = (struct smsc75xx_priv *)(dev->data[0]); if (!pdata) return -ENOMEM; pdata->dev = dev; spin_lock_init(&pdata->rfe_ctl_lock); mutex_init(&pdata->dataport_mutex); INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); if (DEFAULT_TX_CSUM_ENABLE) dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); goto free_pdata; } smsc75xx_init_mac_address(dev); /* Init all registers */ ret = smsc75xx_reset(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); goto cancel_work; } dev->net->netdev_ops = &smsc75xx_netdev_ops; dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE; return 0; cancel_work: cancel_work_sync(&pdata->set_multicast); free_pdata: kfree(pdata); dev->data[0] = 0; return ret; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); if (pdata) { cancel_work_sync(&pdata->set_multicast); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); dev->data[0] = 0; } } static u16 smsc_crc(const u8 *buffer, size_t len) { return bitrev16(crc16(0xFFFF, buffer, len)); } static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg, u32 wuf_mask1) { int cfg_base = WUF_CFGX + filter * 4; int mask_base = WUF_MASKX + filter * 16; int ret; ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 8, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 12, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } return 0; } static int smsc75xx_enter_suspend0(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST)); val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND0; return 0; } static int smsc75xx_enter_suspend1(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_1; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND1; return 0; } static int smsc75xx_enter_suspend2(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_2; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND2; return 0; } static int smsc75xx_enter_suspend3(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading FCT_RX_CTL\n"); return ret; } if (val & FCT_RX_CTL_RXUSED) { netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n"); return -EBUSY; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status */ val &= ~PMT_CTL_WUPS; val |= PMT_CTL_WUPS_WOL; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND3; return 0; } static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) { struct mii_if_info *mii = &dev->mii; int ret; netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n"); /* read to clear */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } /* enable interrupt source */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); return ret; } ret |= mask; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret); return 0; } static int smsc75xx_link_ok_nopm(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; int ret; /* first, a dummy read, needed to latch some MII phys */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } return !!(ret & BMSR_LSTATUS); } static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up) { int ret; if (!netif_running(dev->net)) { /* interface is ifconfig down so fully power down hw */ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n"); return smsc75xx_enter_suspend2(dev); } if (!link_up) { /* link is down so enter EDPD mode */ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n"); /* enable PHY wakeup events for if cable is attached */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_ANEG_COMP); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_info(dev->net, "entering SUSPEND1 mode\n"); return smsc75xx_enter_suspend1(dev); } /* enable PHY wakeup events so we remote wakeup if cable is pulled */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_LINK_DOWN); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); return smsc75xx_enter_suspend3(dev); } static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val, link_up; int ret; ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); return ret; } if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; } /* determine if link is up using only _nopm functions */ link_up = smsc75xx_link_ok_nopm(dev); if (message.event == PM_EVENT_AUTO_SUSPEND) { ret = smsc75xx_autosuspend(dev, link_up); goto done; } /* if we get this far we're not autosuspending */ /* if no wol options set, or if link is down and we're not waking on * PHY activity, enter lowest power SUSPEND2 mode */ if (!(pdata->wolopts & SUPPORTED_WAKE) || !(link_up || (pdata->wolopts & WAKE_PHY))) { netdev_info(dev->net, "entering SUSPEND2 mode\n"); /* disable energy detect (link up) & wake up events */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_WUEN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } ret = smsc75xx_enter_suspend2(dev); goto done; } if (pdata->wolopts & WAKE_PHY) { ret = smsc75xx_enable_phy_wakeup_interrupts(dev, (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN)); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); goto done; } /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below */ if (!link_up) { struct mii_if_info *mii = &dev->mii; netdev_info(dev->net, "entering SUSPEND1 mode\n"); /* enable energy detect power-down mode */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); goto done; } ret |= MODE_CTRL_STS_EDPWRDOWN; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret); /* enter SUSPEND1 mode */ ret = smsc75xx_enter_suspend1(dev); goto done; } } if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) { int i, filter = 0; /* disable all filters */ for (i = 0; i < WUF_NUM; i++) { ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); goto done; } } if (pdata->wolopts & WAKE_MCAST) { const u8 mcast[] = {0x01, 0x00, 0x5E}; netdev_info(dev->net, "enabling multicast detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST | smsc_crc(mcast, 3); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } if (pdata->wolopts & WAKE_ARP) { const u8 arp[] = {0x08, 0x06}; netdev_info(dev->net, "enabling ARP detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16) | smsc_crc(arp, 2); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } /* clear any pending pattern match packet status */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } netdev_info(dev->net, "enabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } else { netdev_info(dev->net, "disabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* disable magic, bcast & unicast wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } if (pdata->wolopts & WAKE_PHY) { netdev_info(dev->net, "enabling PHY wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } } if (pdata->wolopts & WAKE_MAGIC) { netdev_info(dev->net, "enabling magic packet wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } /* clear any pending magic packet status */ val |= WUCSR_MPR | WUCSR_MPEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_BCAST) { netdev_info(dev->net, "enabling broadcast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_BCAST_FR | WUCSR_BCST_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_UCAST) { netdev_info(dev->net, "enabling unicast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR | WUCSR_PFDA_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* enable receiver to enable frame reception */ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); goto done; } val |= MAC_RX_RXEN; ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); goto done; } /* some wol options are enabled, so enter SUSPEND0 */ netdev_info(dev->net, "entering SUSPEND0 mode\n"); ret = smsc75xx_enter_suspend0(dev); done: /* * TODO: resume() might need to handle the suspend failure * in system sleep */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); return ret; } static int smsc75xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u8 suspend_flags = pdata->suspend_flags; int ret; u32 val; netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; if (suspend_flags & SUSPEND_ALLMODES) { /* Disable wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); return ret; } val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN | WUCSR_BCST_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); return ret; } /* clear wake-up status */ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~PMT_CTL_WOL_EN; val |= PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } if (suspend_flags & SUSPEND_SUSPEND2) { netdev_info(dev->net, "resuming from SUSPEND2\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val |= PMT_CTL_PHY_PWRUP; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } ret = smsc75xx_wait_ready(dev, 1); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_resume\n"); return ret; } return usbnet_resume(intf); } static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { if (!(dev->net->features & NETIF_F_RXCSUM) || unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); skb->ip_summed = CHECKSUM_COMPLETE; } } static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; unsigned char *packet; rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, 4); rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; /* get the packet length */ size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; if (unlikely(size > skb->len)) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); dev->net->stats.rx_errors++; dev->net->stats.rx_dropped++; if (rx_cmd_a & RX_CMD_A_FCS) dev->net->stats.rx_crc_errors++; else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } /* last frame in this batch */ if (skb->len == size) { smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ return 1; } /* Use "size - 4" to remove fcs */ ax_skb = netdev_alloc_skb_ip_align(dev->net, size - 4); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } skb_put(ax_skb, size - 4); memcpy(ax_skb->data, packet, size - 4); smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a, rx_cmd_b); usbnet_skb_return(dev, ax_skb); } skb_pull(skb, size); /* padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } return 1; } static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; void *ptr; if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { dev_kfree_skb_any(skb); return NULL; } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; if (skb->ip_summed == CHECKSUM_PARTIAL) tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; if (skb_is_gso(skb)) { u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; tx_cmd_a |= TX_CMD_A_LSO; } else { tx_cmd_b = 0; } ptr = skb_push(skb, 8); put_unaligned_le32(tx_cmd_a, ptr); put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } static int smsc75xx_manage_power(struct usbnet *dev, int on) { dev->intf->needs_remote_wakeup = on; return 0; } static const struct driver_info smsc75xx_info = { .description = "smsc75xx USB 2.0 Gigabit Ethernet", .bind = smsc75xx_bind, .unbind = smsc75xx_unbind, .link_reset = smsc75xx_link_reset, .reset = smsc75xx_reset, .rx_fixup = smsc75xx_rx_fixup, .tx_fixup = smsc75xx_tx_fixup, .status = smsc75xx_status, .manage_power = smsc75xx_manage_power, .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), .driver_info = (unsigned long) &smsc75xx_info, }, { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), .driver_info = (unsigned long) &smsc75xx_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver smsc75xx_driver = { .name = SMSC_CHIPNAME, .id_table = products, .probe = usbnet_probe, .suspend = smsc75xx_suspend, .resume = smsc75xx_resume, .reset_resume = smsc75xx_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, .supports_autosuspend = 1, }; module_usb_driver(smsc75xx_driver); MODULE_AUTHOR("Nancy Lin"); MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>"); MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); MODULE_LICENSE("GPL"); |
| 3264 3259 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Driver for 8250/16550-type serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2001 Russell King. */ #include <linux/bits.h> #include <linux/serial_8250.h> #include <linux/serial_core.h> #include <linux/dmaengine.h> #include "../serial_mctrl_gpio.h" struct uart_8250_dma { int (*tx_dma)(struct uart_8250_port *p); int (*rx_dma)(struct uart_8250_port *p); void (*prepare_tx_dma)(struct uart_8250_port *p); void (*prepare_rx_dma)(struct uart_8250_port *p); /* Filter function */ dma_filter_fn fn; /* Parameter to the filter function */ void *rx_param; void *tx_param; struct dma_slave_config rxconf; struct dma_slave_config txconf; struct dma_chan *rxchan; struct dma_chan *txchan; /* Device address base for DMA operations */ phys_addr_t rx_dma_addr; phys_addr_t tx_dma_addr; /* DMA address of the buffer in memory */ dma_addr_t rx_addr; dma_addr_t tx_addr; dma_cookie_t rx_cookie; dma_cookie_t tx_cookie; void *rx_buf; size_t rx_size; size_t tx_size; unsigned char tx_running; unsigned char tx_err; unsigned char rx_running; }; struct old_serial_port { unsigned int uart; unsigned int baud_base; unsigned int port; unsigned int irq; upf_t flags; unsigned char io_type; unsigned char __iomem *iomem_base; unsigned short iomem_reg_shift; }; struct serial8250_config { const char *name; unsigned short fifo_size; unsigned short tx_loadsz; unsigned char fcr; unsigned char rxtrig_bytes[UART_FCR_R_TRIG_MAX_STATE]; unsigned int flags; }; #define UART_CAP_FIFO BIT(8) /* UART has FIFO */ #define UART_CAP_EFR BIT(9) /* UART has EFR */ #define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */ #define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */ #define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */ #define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */ #define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */ #define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */ #define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */ #define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks: * STOP PARITY EPAR SPAR WLEN5 WLEN6 */ #define UART_CAP_NOTEMT BIT(18) /* UART without interrupt on TEMT available */ #define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */ #define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */ #define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */ #define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */ #define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */ /* Module parameters */ #define UART_NR CONFIG_SERIAL_8250_NR_UARTS extern unsigned int nr_uarts; #define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF | (_flags), \ } #define SERIAL8250_PORT(_base, _irq) SERIAL8250_PORT_FLAGS(_base, _irq, 0) extern struct uart_driver serial8250_reg; void serial8250_register_ports(struct uart_driver *drv, struct device *dev); /* Legacy ISA bus related APIs */ typedef void (*serial8250_isa_config_fn)(int, struct uart_port *, u32 *); extern serial8250_isa_config_fn serial8250_isa_config; void serial8250_isa_init_ports(void); extern struct platform_device *serial8250_isa_devs; extern const struct uart_ops *univ8250_port_base_ops; extern struct uart_ops univ8250_port_ops; static inline int serial_in(struct uart_8250_port *up, int offset) { return up->port.serial_in(&up->port, offset); } static inline void serial_out(struct uart_8250_port *up, int offset, int value) { up->port.serial_out(&up->port, offset, value); } /** * serial_lsr_in - Read LSR register and preserve flags across reads * @up: uart 8250 port * * Read LSR register and handle saving non-preserved flags across reads. * The flags that are not preserved across reads are stored into * up->lsr_saved_flags. * * Returns LSR value or'ed with the preserved flags (if any). */ static inline u16 serial_lsr_in(struct uart_8250_port *up) { u16 lsr = up->lsr_saved_flags; lsr |= serial_in(up, UART_LSR); up->lsr_saved_flags = lsr & up->lsr_save_mask; return lsr; } /* * For the 16C950 */ static void serial_icr_write(struct uart_8250_port *up, int offset, int value) { serial_out(up, UART_SCR, offset); serial_out(up, UART_ICR, value); } static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up, int offset) { unsigned int value; serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); serial_out(up, UART_SCR, offset); value = serial_in(up, UART_ICR); serial_icr_write(up, UART_ACR, up->acr); return value; } void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p); void serial8250_rpm_get(struct uart_8250_port *p); void serial8250_rpm_put(struct uart_8250_port *p); DEFINE_GUARD(serial8250_rpm, struct uart_8250_port *, serial8250_rpm_get(_T), serial8250_rpm_put(_T)); static inline u32 serial_dl_read(struct uart_8250_port *up) { return up->dl_read(up); } static inline void serial_dl_write(struct uart_8250_port *up, u32 value) { up->dl_write(up, value); } static inline bool serial8250_set_THRI(struct uart_8250_port *up) { /* Port locked to synchronize UART_IER access against the console. */ lockdep_assert_held_once(&up->port.lock); if (up->ier & UART_IER_THRI) return false; up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); return true; } static inline bool serial8250_clear_THRI(struct uart_8250_port *up) { /* Port locked to synchronize UART_IER access against the console. */ lockdep_assert_held_once(&up->port.lock); if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; serial_out(up, UART_IER, up->ier); return true; } struct uart_8250_port *serial8250_setup_port(int index); struct uart_8250_port *serial8250_get_port(int line); int serial8250_em485_config(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485); void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier); void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier); void serial8250_em485_destroy(struct uart_8250_port *p); extern struct serial_rs485 serial8250_em485_supported; /* MCR <-> TIOCM conversion */ static inline int serial8250_TIOCM_to_MCR(int tiocm) { int mcr = 0; if (tiocm & TIOCM_RTS) mcr |= UART_MCR_RTS; if (tiocm & TIOCM_DTR) mcr |= UART_MCR_DTR; if (tiocm & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (tiocm & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (tiocm & TIOCM_LOOP) mcr |= UART_MCR_LOOP; return mcr; } static inline int serial8250_MCR_to_TIOCM(int mcr) { int tiocm = 0; if (mcr & UART_MCR_RTS) tiocm |= TIOCM_RTS; if (mcr & UART_MCR_DTR) tiocm |= TIOCM_DTR; if (mcr & UART_MCR_OUT1) tiocm |= TIOCM_OUT1; if (mcr & UART_MCR_OUT2) tiocm |= TIOCM_OUT2; if (mcr & UART_MCR_LOOP) tiocm |= TIOCM_LOOP; return tiocm; } /* MSR <-> TIOCM conversion */ static inline int serial8250_MSR_to_TIOCM(int msr) { int tiocm = 0; if (msr & UART_MSR_DCD) tiocm |= TIOCM_CAR; if (msr & UART_MSR_RI) tiocm |= TIOCM_RNG; if (msr & UART_MSR_DSR) tiocm |= TIOCM_DSR; if (msr & UART_MSR_CTS) tiocm |= TIOCM_CTS; return tiocm; } static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) { serial_out(up, UART_MCR, value); if (up->gpios) mctrl_gpio_set(up->gpios, serial8250_MCR_to_TIOCM(value)); } static inline int serial8250_in_MCR(struct uart_8250_port *up) { int mctrl; mctrl = serial_in(up, UART_MCR); if (up->gpios) { unsigned int mctrl_gpio = 0; mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio); mctrl |= serial8250_TIOCM_to_MCR(mctrl_gpio); } return mctrl; } #ifdef CONFIG_SERIAL_8250_PNP int serial8250_pnp_init(void); void serial8250_pnp_exit(void); #else static inline int serial8250_pnp_init(void) { return 0; } static inline void serial8250_pnp_exit(void) { } #endif #ifdef CONFIG_SERIAL_8250_RSA void univ8250_rsa_support(struct uart_ops *ops, const struct uart_ops *core_ops); void rsa_enable(struct uart_8250_port *up); void rsa_disable(struct uart_8250_port *up); void rsa_autoconfig(struct uart_8250_port *up); void rsa_reset(struct uart_8250_port *up); #else static inline void univ8250_rsa_support(struct uart_ops *ops, const struct uart_ops *core_ops) { } static inline void rsa_enable(struct uart_8250_port *up) {} static inline void rsa_disable(struct uart_8250_port *up) {} static inline void rsa_autoconfig(struct uart_8250_port *up) {} static inline void rsa_reset(struct uart_8250_port *up) {} #endif #ifdef CONFIG_SERIAL_8250_FINTEK int fintek_8250_probe(struct uart_8250_port *uart); #else static inline int fintek_8250_probe(struct uart_8250_port *uart) { return 0; } #endif #ifdef CONFIG_ARCH_OMAP1 #include <linux/soc/ti/omap1-soc.h> static inline int is_omap1_8250(struct uart_8250_port *pt) { int res; switch (pt->port.mapbase) { case OMAP1_UART1_BASE: case OMAP1_UART2_BASE: case OMAP1_UART3_BASE: res = 1; break; default: res = 0; break; } return res; } static inline int is_omap1510_8250(struct uart_8250_port *pt) { if (!cpu_is_omap1510()) return 0; return is_omap1_8250(pt); } #else static inline int is_omap1_8250(struct uart_8250_port *pt) { return 0; } static inline int is_omap1510_8250(struct uart_8250_port *pt) { return 0; } #endif #ifdef CONFIG_SERIAL_8250_DMA extern int serial8250_tx_dma(struct uart_8250_port *); extern void serial8250_tx_dma_flush(struct uart_8250_port *); extern int serial8250_rx_dma(struct uart_8250_port *); extern void serial8250_rx_dma_flush(struct uart_8250_port *); extern int serial8250_request_dma(struct uart_8250_port *); extern void serial8250_release_dma(struct uart_8250_port *); static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; if (dma->prepare_tx_dma) dma->prepare_tx_dma(p); } static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; if (dma->prepare_rx_dma) dma->prepare_rx_dma(p); } static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; return dma && dma->tx_running; } #else static inline int serial8250_tx_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { } static inline int serial8250_rx_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { } static inline int serial8250_request_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_release_dma(struct uart_8250_port *p) { } static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) { return false; } #endif static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) { unsigned char status; status = serial_in(up, 0x04); /* EXCR2 */ #define PRESL(x) ((x) & 0x30) if (PRESL(status) == 0x10) { /* already in high speed mode */ return 0; } else { status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_out(up, 0x04, status); } return 1; } static inline int serial_index(struct uart_port *port) { return port->minor - 64; } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * IEEE802.15.4-2003 specification * * Copyright (C) 2007-2012 Siemens AG */ #ifndef NET_MAC802154_H #define NET_MAC802154_H #include <linux/unaligned.h> #include <net/af_ieee802154.h> #include <linux/ieee802154.h> #include <linux/skbuff.h> #include <net/cfg802154.h> /** * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags * * The following flags are used to indicate changed address settings from * the stack to the hardware. * * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be * change. * * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address * will be change. * * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change. * * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will * do frame address filtering as a pan coordinator. */ enum ieee802154_hw_addr_filt_flags { IEEE802154_AFILT_SADDR_CHANGED = BIT(0), IEEE802154_AFILT_IEEEADDR_CHANGED = BIT(1), IEEE802154_AFILT_PANID_CHANGED = BIT(2), IEEE802154_AFILT_PANC_CHANGED = BIT(3), }; /** * struct ieee802154_hw_addr_filt - hardware address filtering settings * * @pan_id: pan_id which should be set to the hardware address filter. * * @short_addr: short_addr which should be set to the hardware address filter. * * @ieee_addr: extended address which should be set to the hardware address * filter. * * @pan_coord: boolean if hardware filtering should be operate as coordinator. */ struct ieee802154_hw_addr_filt { __le16 pan_id; __le16 short_addr; __le64 ieee_addr; bool pan_coord; }; /** * struct ieee802154_hw - ieee802154 hardware * * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the * driver (e.g. for transmit headers.) * * @flags: hardware flags, see &enum ieee802154_hw_flags * * @parent: parent device of the hardware. * * @priv: pointer to private area that was allocated for driver use along with * this structure. * * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY. */ struct ieee802154_hw { /* filled by the driver */ int extra_tx_headroom; u32 flags; struct device *parent; void *priv; /* filled by mac802154 core */ struct wpan_phy *phy; }; /** * enum ieee802154_hw_flags - hardware flags * * These flags are used to indicate hardware capabilities to * the stack. Generally, flags here should have their meaning * done in a way that the simplest hardware doesn't need setting * any particular flags. There are some exceptions to this rule, * however, so you are advised to review these flags carefully. * * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's * own. * * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before * transmit. * * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma * parameters (max_be, min_be, backoff exponents). * * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET * frame retries setting. * * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware * address filter setting. * * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support * promiscuous mode setting. * * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS. */ enum ieee802154_hw_flags { IEEE802154_HW_TX_OMIT_CKSUM = BIT(0), IEEE802154_HW_LBT = BIT(1), IEEE802154_HW_CSMA_PARAMS = BIT(2), IEEE802154_HW_FRAME_RETRIES = BIT(3), IEEE802154_HW_AFILT = BIT(4), IEEE802154_HW_PROMISCUOUS = BIT(5), IEEE802154_HW_RX_OMIT_CKSUM = BIT(6), }; /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ #define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ IEEE802154_HW_RX_OMIT_CKSUM) /* struct ieee802154_ops - callbacks from mac802154 to the driver * * This structure contains various callbacks that the driver may * handle or, in some cases, must handle, for example to transmit * a frame. * * start: Handler that 802.15.4 module calls for device initialization. * This function is called before the first interface is attached. * * stop: Handler that 802.15.4 module calls for device cleanup. * This function is called after the last interface is removed. * * xmit_sync: * Handler that 802.15.4 module calls for each transmitted frame. * skb contains the buffer starting from the IEEE 802.15.4 header. * The low-level driver should send the frame based on available * configuration. This is called by a workqueue and useful for * synchronous 802.15.4 drivers. * This function should return zero or negative errno. * * WARNING: * This will be deprecated soon. We don't accept synced xmit callbacks * drivers anymore. * * xmit_async: * Handler that 802.15.4 module calls for each transmitted frame. * skb contains the buffer starting from the IEEE 802.15.4 header. * The low-level driver should send the frame based on available * configuration. * This function should return zero or negative errno. * * ed: Handler that 802.15.4 module calls for Energy Detection. * This function should place the value for detected energy * (usually device-dependant) in the level pointer and return * either zero or negative errno. Called with pib_lock held. * * set_channel: * Set radio for listening on specific channel. * Set the device for listening on specified channel. * Returns either zero, or negative errno. Called with pib_lock held. * * set_hw_addr_filt: * Set radio for listening on specific address. * Set the device for listening on specified address. * Returns either zero, or negative errno. * * set_txpower: * Set radio transmit power in mBm. Called with pib_lock held. * Returns either zero, or negative errno. * * set_lbt * Enables or disables listen before talk on the device. Called with * pib_lock held. * Returns either zero, or negative errno. * * set_cca_mode * Sets the CCA mode used by the device. Called with pib_lock held. * Returns either zero, or negative errno. * * set_cca_ed_level * Sets the CCA energy detection threshold in mBm. Called with pib_lock * held. * Returns either zero, or negative errno. * * set_csma_params * Sets the CSMA parameter set for the PHY. Called with pib_lock held. * Returns either zero, or negative errno. * * set_frame_retries * Sets the retransmission attempt limit. Called with pib_lock held. * Returns either zero, or negative errno. * * set_promiscuous_mode * Enables or disable promiscuous mode. */ struct ieee802154_ops { struct module *owner; int (*start)(struct ieee802154_hw *hw); void (*stop)(struct ieee802154_hw *hw); int (*xmit_sync)(struct ieee802154_hw *hw, struct sk_buff *skb); int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb); int (*ed)(struct ieee802154_hw *hw, u8 *level); int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel); int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm); int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); int (*set_frame_retries)(struct ieee802154_hw *hw, s8 retries); int (*set_promiscuous_mode)(struct ieee802154_hw *hw, const bool on); }; /** * ieee802154_get_fc_from_skb - get the frame control field from an skb * @skb: skb where the frame control field will be get from */ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) { __le16 fc; /* check if we can fc at skb_mac_header of sk buffer */ if (WARN_ON(!skb_mac_header_was_set(skb) || (skb_tail_pointer(skb) - skb_mac_header(skb)) < IEEE802154_FC_LEN)) return cpu_to_le16(0); memcpy(&fc, skb_mac_header(skb), IEEE802154_FC_LEN); return fc; } /** * ieee802154_skb_dst_pan - get the pointer to destination pan field * @fc: mac header frame control field * @skb: skb where the destination pan pointer will be get from */ static inline unsigned char *ieee802154_skb_dst_pan(__le16 fc, const struct sk_buff *skb) { unsigned char *dst_pan; switch (ieee802154_daddr_mode(fc)) { case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE): dst_pan = NULL; break; case cpu_to_le16(IEEE802154_FCTL_DADDR_SHORT): case cpu_to_le16(IEEE802154_FCTL_DADDR_EXTENDED): dst_pan = skb_mac_header(skb) + IEEE802154_FC_LEN + IEEE802154_SEQ_LEN; break; default: WARN_ONCE(1, "invalid addr mode detected"); dst_pan = NULL; break; } return dst_pan; } /** * ieee802154_skb_src_pan - get the pointer to source pan field * @fc: mac header frame control field * @skb: skb where the source pan pointer will be get from */ static inline unsigned char *ieee802154_skb_src_pan(__le16 fc, const struct sk_buff *skb) { unsigned char *src_pan; switch (ieee802154_saddr_mode(fc)) { case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE): src_pan = NULL; break; case cpu_to_le16(IEEE802154_FCTL_SADDR_SHORT): case cpu_to_le16(IEEE802154_FCTL_SADDR_EXTENDED): /* if intra-pan and source addr mode is non none, * then source pan id is equal destination pan id. */ if (ieee802154_is_intra_pan(fc)) { src_pan = ieee802154_skb_dst_pan(fc, skb); break; } switch (ieee802154_daddr_mode(fc)) { case cpu_to_le16(IEEE802154_FCTL_ADDR_NONE): src_pan = skb_mac_header(skb) + IEEE802154_FC_LEN + IEEE802154_SEQ_LEN; break; case cpu_to_le16(IEEE802154_FCTL_DADDR_SHORT): src_pan = skb_mac_header(skb) + IEEE802154_FC_LEN + IEEE802154_SEQ_LEN + IEEE802154_PAN_ID_LEN + IEEE802154_SHORT_ADDR_LEN; break; case cpu_to_le16(IEEE802154_FCTL_DADDR_EXTENDED): src_pan = skb_mac_header(skb) + IEEE802154_FC_LEN + IEEE802154_SEQ_LEN + IEEE802154_PAN_ID_LEN + IEEE802154_EXTENDED_ADDR_LEN; break; default: WARN_ONCE(1, "invalid addr mode detected"); src_pan = NULL; break; } break; default: WARN_ONCE(1, "invalid addr mode detected"); src_pan = NULL; break; } return src_pan; } /** * ieee802154_skb_is_intra_pan_addressing - checks whenever the mac addressing * is an intra pan communication * @fc: mac header frame control field * @skb: skb where the source and destination pan should be get from */ static inline bool ieee802154_skb_is_intra_pan_addressing(__le16 fc, const struct sk_buff *skb) { unsigned char *dst_pan = ieee802154_skb_dst_pan(fc, skb), *src_pan = ieee802154_skb_src_pan(fc, skb); /* if one is NULL is no intra pan addressing */ if (!dst_pan || !src_pan) return false; return !memcmp(dst_pan, src_pan, IEEE802154_PAN_ID_LEN); } /** * ieee802154_be64_to_le64 - copies and convert be64 to le64 * @le64_dst: le64 destination pointer * @be64_src: be64 source pointer */ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) { put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst); } /** * ieee802154_le64_to_be64 - copies and convert le64 to be64 * @be64_dst: be64 destination pointer * @le64_src: le64 source pointer */ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) { put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst); } /** * ieee802154_le16_to_be16 - copies and convert le16 to be16 * @be16_dst: be16 destination pointer * @le16_src: le16 source pointer */ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) { put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst); } /** * ieee802154_be16_to_le16 - copies and convert be16 to le16 * @le16_dst: le16 destination pointer * @be16_src: be16 source pointer */ static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src) { put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst); } /** * ieee802154_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. * mac802154 allocates a private data area for the driver pointed to by * @priv in &struct ieee802154_hw, the size of this area is given as * @priv_data_len. * * @priv_data_len: length of private data * @ops: callbacks for this device * * Return: A pointer to the new hardware device, or %NULL on error. */ struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); /** * ieee802154_free_hw - free hardware descriptor * * This function frees everything that was allocated, including the * private data for the driver. You must call ieee802154_unregister_hw() * before calling this function. * * @hw: the hardware to free */ void ieee802154_free_hw(struct ieee802154_hw *hw); /** * ieee802154_register_hw - Register hardware device * * You must call this function before any other functions in * mac802154. Note that before a hardware can be registered, you * need to fill the contained wpan_phy's information. * * @hw: the device to register as returned by ieee802154_alloc_hw() * * Return: 0 on success. An error code otherwise. */ int ieee802154_register_hw(struct ieee802154_hw *hw); /** * ieee802154_unregister_hw - Unregister a hardware device * * This function instructs mac802154 to free allocated resources * and unregister netdevices from the networking subsystem. * * @hw: the hardware to unregister */ void ieee802154_unregister_hw(struct ieee802154_hw *hw); /** * ieee802154_rx_irqsafe - receive frame * * Like ieee802154_rx() but can be called in IRQ context * (internally defers to a tasklet.) * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac802154 after this call * @lqi: link quality indicator */ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); /** * ieee802154_xmit_complete - frame transmission complete * * @hw: pointer as obtained from ieee802154_alloc_hw(). * @skb: buffer for transmission * @ifs_handling: indicate interframe space handling */ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, bool ifs_handling); /** * ieee802154_xmit_error - offloaded frame transmission failed * * @hw: pointer as obtained from ieee802154_alloc_hw(). * @skb: buffer for transmission * @reason: error code */ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb, int reason); /** * ieee802154_xmit_hw_error - frame could not be offloaded to the transmitter * because of a hardware error (bus error, timeout, etc) * * @hw: pointer as obtained from ieee802154_alloc_hw(). * @skb: buffer for transmission */ void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb); #endif /* NET_MAC802154_H */ |
| 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 | // SPDX-License-Identifier: GPL-2.0 /* * Implement the default iomap interfaces * * (C) Copyright 2004 Linus Torvalds */ #include <linux/pci.h> #include <linux/io.h> #include <linux/kmsan-checks.h> #include <linux/export.h> /* * Read/write from/to an (offsettable) iomem cookie. It might be a PIO * access or a MMIO access, these functions don't care. The info is * encoded in the hardware mapping set up by the mapping functions * (or the cookie itself, depending on implementation and hw). * * The generic routines don't assume any hardware mappings, and just * encode the PIO/MMIO as part of the cookie. They coldly assume that * the MMIO IO mappings are not in the low address range. * * Architectures for which this is not true can't use this generic * implementation and should do their own copy. */ #ifndef HAVE_ARCH_PIO_SIZE /* * We encode the physical PIO addresses (0-0xffff) into the * pointer by offsetting them with a constant (0x10000) and * assuming that all the low addresses are always PIO. That means * we can do some sanity checks on the low bits, and don't * need to just take things for granted. */ #define PIO_OFFSET 0x10000UL #define PIO_MASK 0x0ffffUL #define PIO_RESERVED 0x40000UL #endif static void bad_io_access(unsigned long port, const char *access) { static int count = 10; if (count) { count--; WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); } } /* * Ugly macros are a way of life. */ #define IO_COND(addr, is_pio, is_mmio) do { \ unsigned long port = (unsigned long __force)addr; \ if (port >= PIO_RESERVED) { \ is_mmio; \ } else if (port > PIO_OFFSET) { \ port &= PIO_MASK; \ is_pio; \ } else \ bad_io_access(port, #is_pio ); \ } while (0) #ifndef pio_read16be #define pio_read16be(port) swab16(inw(port)) #define pio_read32be(port) swab32(inl(port)) #endif #ifndef mmio_read16be #define mmio_read16be(addr) swab16(readw(addr)) #define mmio_read32be(addr) swab32(readl(addr)) #define mmio_read64be(addr) swab64(readq(addr)) #endif /* * Here and below, we apply __no_kmsan_checks to functions reading data from * hardware, to ensure that KMSAN marks their return values as initialized. */ __no_kmsan_checks unsigned int ioread8(const void __iomem *addr) { IO_COND(addr, return inb(port), return readb(addr)); return 0xff; } __no_kmsan_checks unsigned int ioread16(const void __iomem *addr) { IO_COND(addr, return inw(port), return readw(addr)); return 0xffff; } __no_kmsan_checks unsigned int ioread16be(const void __iomem *addr) { IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); return 0xffff; } __no_kmsan_checks unsigned int ioread32(const void __iomem *addr) { IO_COND(addr, return inl(port), return readl(addr)); return 0xffffffff; } __no_kmsan_checks unsigned int ioread32be(const void __iomem *addr) { IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); return 0xffffffff; } EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); #ifdef CONFIG_64BIT static u64 pio_read64_lo_hi(unsigned long port) { u64 lo, hi; lo = inl(port); hi = inl(port + sizeof(u32)); return lo | (hi << 32); } static u64 pio_read64_hi_lo(unsigned long port) { u64 lo, hi; hi = inl(port + sizeof(u32)); lo = inl(port); return lo | (hi << 32); } static u64 pio_read64be_lo_hi(unsigned long port) { u64 lo, hi; lo = pio_read32be(port + sizeof(u32)); hi = pio_read32be(port); return lo | (hi << 32); } static u64 pio_read64be_hi_lo(unsigned long port) { u64 lo, hi; hi = pio_read32be(port); lo = pio_read32be(port + sizeof(u32)); return lo | (hi << 32); } __no_kmsan_checks u64 __ioread64_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); return 0xffffffffffffffffULL; } __no_kmsan_checks u64 __ioread64_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); return 0xffffffffffffffffULL; } __no_kmsan_checks u64 __ioread64be_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64be_lo_hi(port), return mmio_read64be(addr)); return 0xffffffffffffffffULL; } __no_kmsan_checks u64 __ioread64be_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64be_hi_lo(port), return mmio_read64be(addr)); return 0xffffffffffffffffULL; } EXPORT_SYMBOL(__ioread64_lo_hi); EXPORT_SYMBOL(__ioread64_hi_lo); EXPORT_SYMBOL(__ioread64be_lo_hi); EXPORT_SYMBOL(__ioread64be_hi_lo); #endif /* CONFIG_64BIT */ #ifndef pio_write16be #define pio_write16be(val,port) outw(swab16(val),port) #define pio_write32be(val,port) outl(swab32(val),port) #endif #ifndef mmio_write16be #define mmio_write16be(val,port) writew(swab16(val),port) #define mmio_write32be(val,port) writel(swab32(val),port) #define mmio_write64be(val,port) writeq(swab64(val),port) #endif void iowrite8(u8 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, outb(val,port), writeb(val, addr)); } void iowrite16(u16 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, outw(val,port), writew(val, addr)); } void iowrite16be(u16 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr)); } void iowrite32(u32 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, outl(val,port), writel(val, addr)); } void iowrite32be(u32 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr)); } EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); #ifdef CONFIG_64BIT static void pio_write64_lo_hi(u64 val, unsigned long port) { outl(val, port); outl(val >> 32, port + sizeof(u32)); } static void pio_write64_hi_lo(u64 val, unsigned long port) { outl(val >> 32, port + sizeof(u32)); outl(val, port); } static void pio_write64be_lo_hi(u64 val, unsigned long port) { pio_write32be(val, port + sizeof(u32)); pio_write32be(val >> 32, port); } static void pio_write64be_hi_lo(u64 val, unsigned long port) { pio_write32be(val >> 32, port); pio_write32be(val, port + sizeof(u32)); } void __iowrite64_lo_hi(u64 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write64_lo_hi(val, port), writeq(val, addr)); } void __iowrite64_hi_lo(u64 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write64_hi_lo(val, port), writeq(val, addr)); } void __iowrite64be_lo_hi(u64 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write64be_lo_hi(val, port), mmio_write64be(val, addr)); } void __iowrite64be_hi_lo(u64 val, void __iomem *addr) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(&val, sizeof(val)); IO_COND(addr, pio_write64be_hi_lo(val, port), mmio_write64be(val, addr)); } EXPORT_SYMBOL(__iowrite64_lo_hi); EXPORT_SYMBOL(__iowrite64_hi_lo); EXPORT_SYMBOL(__iowrite64be_lo_hi); EXPORT_SYMBOL(__iowrite64be_hi_lo); #endif /* CONFIG_64BIT */ /* * These are the "repeat MMIO read/write" functions. * Note the "__raw" accesses, since we don't want to * convert to CPU byte order. We write in "IO byte * order" (we also don't have IO barriers). */ #ifndef mmio_insb static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); *dst = data; dst++; } } static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); *dst = data; dst++; } } static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); *dst = data; dst++; } } #endif #ifndef mmio_outsb static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) { while (--count >= 0) { __raw_writeb(*src, addr); src++; } } static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) { while (--count >= 0) { __raw_writew(*src, addr); src++; } } static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) { while (--count >= 0) { __raw_writel(*src, addr); src++; } } #endif void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); /* KMSAN must treat values read from devices as initialized. */ kmsan_unpoison_memory(dst, count); } void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); /* KMSAN must treat values read from devices as initialized. */ kmsan_unpoison_memory(dst, count * 2); } void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); /* KMSAN must treat values read from devices as initialized. */ kmsan_unpoison_memory(dst, count * 4); } EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(ioread32_rep); void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(src, count); IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count)); } void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(src, count * 2); IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count)); } void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) { /* Make sure uninitialized memory isn't copied to devices. */ kmsan_check_memory(src, count * 4); IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count)); } EXPORT_SYMBOL(iowrite8_rep); EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(iowrite32_rep); #ifdef CONFIG_HAS_IOPORT_MAP /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr) { if (port > PIO_MASK) return NULL; return (void __iomem *) (unsigned long) (port + PIO_OFFSET); } void ioport_unmap(void __iomem *addr) { /* Nothing to do */ } EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); #endif /* CONFIG_HAS_IOPORT_MAP */ #ifdef CONFIG_PCI /* Hide the details if this is a MMIO or PIO address space and just do what * you expect in the correct way. */ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { IO_COND(addr, /* nothing */, iounmap(addr)); } EXPORT_SYMBOL(pci_iounmap); #endif /* CONFIG_PCI */ |
| 7 7 7 7 7 7 7 7 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/pagevec.h> #include <linux/uio.h> #include <linux/mman.h> #include <linux/backing-dev.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" /* * Returns %true if the given DIO request should be attempted with DIO, or * %false if it should fall back to buffered I/O. * * DIO isn't well specified; when it's unsupported (either due to the request * being misaligned, or due to the file not supporting DIO at all), filesystems * either fall back to buffered I/O or return EINVAL. For files that don't use * any special features like encryption or verity, ext4 has traditionally * returned EINVAL for misaligned DIO. iomap_dio_rw() uses this convention too. * In this case, we should attempt the DIO, *not* fall back to buffered I/O. * * In contrast, in cases where DIO is unsupported due to ext4 features, ext4 * traditionally falls back to buffered I/O. * * This function implements the traditional ext4 behavior in all these cases. */ static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter) { struct inode *inode = file_inode(iocb->ki_filp); u32 dio_align = ext4_dio_alignment(inode); if (dio_align == 0) return false; if (dio_align == 1) return true; return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align); } static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) { ssize_t ret; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { inode_lock_shared(inode); } if (!ext4_should_use_dio(iocb, to)) { inode_unlock_shared(inode); /* * Fallback to buffered I/O if the operation being performed on * the inode is not supported by direct I/O. The IOCB_DIRECT * flag needs to be cleared here in order to ensure that the * direct I/O path within generic_file_read_iter() is not * taken. */ iocb->ki_flags &= ~IOCB_DIRECT; return generic_file_read_iter(iocb, to); } ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0); inode_unlock_shared(inode); file_accessed(iocb->ki_filp); return ret; } #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { inode_lock_shared(inode); } /* * Recheck under inode lock - at this point we are sure it cannot * change anymore */ if (!IS_DAX(inode)) { inode_unlock_shared(inode); /* Fallback to buffered IO in case we cannot support DAX */ return generic_file_read_iter(iocb, to); } ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); inode_unlock_shared(inode); file_accessed(iocb->ki_filp); return ret; } #endif static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; if (!iov_iter_count(to)) return 0; /* skip atime */ #ifdef CONFIG_FS_DAX if (IS_DAX(inode)) return ext4_dax_read_iter(iocb, to); #endif if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_read_iter(iocb, to); return generic_file_read_iter(iocb, to); } static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; return filemap_splice_read(in, ppos, pipe, len, flags); } /* * Called when an inode is released. Note that this is different * from ext4_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext4_release_file(struct inode *inode, struct file *filp) { if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { ext4_alloc_da_blocks(inode); ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) ext4_htree_free_dir_info(filp->private_data); return 0; } /* * This tests whether the IO in question is block-aligned or not. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they * are converted to written only after the IO is complete. Until they are * mapped, these blocks appear as holes, so dio_zero_block() will assume that * it needs to zero out portions of the start and/or end block. If 2 AIO * threads are at work on the same unwritten block, they must be synchronized * or one thread will zero the other's data, causing corruption. */ static bool ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos) { struct super_block *sb = inode->i_sb; unsigned long blockmask = sb->s_blocksize - 1; if ((pos | iov_iter_alignment(from)) & blockmask) return true; return false; } static bool ext4_extending_io(struct inode *inode, loff_t offset, size_t len) { if (offset + len > i_size_read(inode) || offset + len > EXT4_I(inode)->i_disksize) return true; return false; } /* Is IO overwriting allocated or initialized blocks? */ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len, bool *unwritten) { struct ext4_map_blocks map; unsigned int blkbits = inode->i_blkbits; int err, blklen; if (pos + len > i_size_read(inode)) return false; map.m_lblk = pos >> blkbits; map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); blklen = map.m_len; err = ext4_map_blocks(NULL, inode, &map, 0); if (err != blklen) return false; /* * 'err==len' means that all of the blocks have been preallocated, * regardless of whether they have been initialized or not. We need to * check m_flags to distinguish the unwritten extents. */ *unwritten = !(map.m_flags & EXT4_MAP_MAPPED); return true; } static ssize_t ext4_generic_write_checks(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; ret = generic_write_checks(iocb, from); if (ret <= 0) return ret; /* * If we have encountered a bitmap-format file, the size limit * is smaller than s_maxbytes, which is for extent-mapped files. */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) return -EFBIG; iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); } return iov_iter_count(from); } static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret, count; count = ext4_generic_write_checks(iocb, from); if (count <= 0) return count; ret = file_modified(iocb->ki_filp); if (ret) return ret; return count; } static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; inode_lock(inode); ret = ext4_write_checks(iocb, from); if (ret <= 0) goto out; ret = generic_perform_write(iocb, from); out: inode_unlock(inode); if (unlikely(ret <= 0)) return ret; return generic_write_sync(iocb, ret); } static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, ssize_t written, ssize_t count) { handle_t *handle; lockdep_assert_held_write(&inode->i_rwsem); handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) return PTR_ERR(handle); if (ext4_update_inode_size(inode, offset + written)) { int ret = ext4_mark_inode_dirty(handle, inode); if (unlikely(ret)) { ext4_journal_stop(handle); return ret; } } if ((written == count) && inode->i_nlink) ext4_orphan_del(handle, inode); ext4_journal_stop(handle); return written; } /* * Clean up the inode after DIO or DAX extending write has completed and the * inode size has been updated using ext4_handle_inode_extension(). */ static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc) { lockdep_assert_held_write(&inode->i_rwsem); if (need_trunc) { ext4_truncate_failed_write(inode); /* * If the truncate operation failed early, then the inode may * still be on the orphan list. In that case, we need to try * remove the inode from the in-memory linked list. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); return; } /* * If i_disksize got extended either due to writeback of delalloc * blocks or extending truncate while the DIO was running we could fail * to cleanup the orphan list in ext4_handle_inode_extension(). Do it * now. */ if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) { handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { /* * The write has successfully completed. Not much to * do with the error here so just cleanup the orphan * list and hope for the best. */ ext4_orphan_del(NULL, inode); return; } ext4_orphan_del(handle, inode); ext4_journal_stop(handle); } } static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error, unsigned int flags) { loff_t pos = iocb->ki_pos; struct inode *inode = file_inode(iocb->ki_filp); if (!error && size && (flags & IOMAP_DIO_UNWRITTEN) && (iocb->ki_flags & IOCB_ATOMIC)) error = ext4_convert_unwritten_extents_atomic(NULL, inode, pos, size); else if (!error && size && flags & IOMAP_DIO_UNWRITTEN) error = ext4_convert_unwritten_extents(NULL, inode, pos, size); if (error) return error; /* * Note that EXT4_I(inode)->i_disksize can get extended up to * inode->i_size while the I/O was running due to writeback of delalloc * blocks. But the code in ext4_iomap_alloc() is careful to use * zeroed/unwritten extents if this is possible; thus we won't leave * uninitialized blocks in a file even if we didn't succeed in writing * as much as we intended. Also we can race with truncate or write * expanding the file so we have to be a bit careful here. */ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) && pos + size <= i_size_read(inode)) return 0; error = ext4_handle_inode_extension(inode, pos, size, size); return error < 0 ? error : 0; } static const struct iomap_dio_ops ext4_dio_write_ops = { .end_io = ext4_dio_write_end_io, }; /* * The intention here is to start with shared lock acquired then see if any * condition requires an exclusive inode lock. If yes, then we restart the * whole operation by releasing the shared lock and acquiring exclusive lock. * * - For unaligned_io we never take shared lock as it may cause data corruption * when two unaligned IO tries to modify the same block e.g. while zeroing. * * - For extending writes case we don't take the shared lock, since it requires * updating inode i_disksize and/or orphan handling with exclusive lock. * * - shared locking will only be true mostly with overwrites, including * initialized blocks and unwritten blocks. For overwrite unwritten blocks * we protect splitting extents by i_data_sem in ext4_inode_info, so we can * also release exclusive i_rwsem lock. * * - Otherwise we will switch to exclusive i_rwsem lock. */ static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, bool *ilock_shared, bool *extend, bool *unwritten, int *dio_flags) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); loff_t offset; size_t count; ssize_t ret; bool overwrite, unaligned_io; restart: ret = ext4_generic_write_checks(iocb, from); if (ret <= 0) goto out; offset = iocb->ki_pos; count = ret; unaligned_io = ext4_unaligned_io(inode, from, offset); *extend = ext4_extending_io(inode, offset, count); overwrite = ext4_overwrite_io(inode, offset, count, unwritten); /* * Determine whether we need to upgrade to an exclusive lock. This is * required to change security info in file_modified(), for extending * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten * extents (as partial block zeroing may be required). * * Note that unaligned writes are allowed under shared lock so long as * they are pure overwrites. Otherwise, concurrent unaligned writes risk * data corruption due to partial block zeroing in the dio layer, and so * the I/O must occur exclusively. */ if (*ilock_shared && ((!IS_NOSEC(inode) || *extend || !overwrite || (unaligned_io && *unwritten)))) { if (iocb->ki_flags & IOCB_NOWAIT) { ret = -EAGAIN; goto out; } inode_unlock_shared(inode); *ilock_shared = false; inode_lock(inode); goto restart; } /* * Now that locking is settled, determine dio flags and exclusivity * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce * behavior already. The inode lock is already held exclusive if the * write is non-overwrite or extending, so drain all outstanding dio and * set the force wait dio flag. */ if (!*ilock_shared && (unaligned_io || *extend)) { if (iocb->ki_flags & IOCB_NOWAIT) { ret = -EAGAIN; goto out; } if (unaligned_io && (!overwrite || *unwritten)) inode_dio_wait(inode); *dio_flags = IOMAP_DIO_FORCE_WAIT; } ret = file_modified(file); if (ret < 0) goto out; return count; out: if (*ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); return ret; } static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; handle_t *handle; struct inode *inode = file_inode(iocb->ki_filp); loff_t offset = iocb->ki_pos; size_t count = iov_iter_count(from); const struct iomap_ops *iomap_ops = &ext4_iomap_ops; bool extend = false, unwritten = false; bool ilock_shared = true; int dio_flags = 0; /* * Quick check here without any i_rwsem lock to see if it is extending * IO. A more reliable check is done in ext4_dio_write_checks() with * proper locking in place. */ if (offset + count > i_size_read(inode)) ilock_shared = false; if (iocb->ki_flags & IOCB_NOWAIT) { if (ilock_shared) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { if (!inode_trylock(inode)) return -EAGAIN; } } else { if (ilock_shared) inode_lock_shared(inode); else inode_lock(inode); } /* Fallback to buffered I/O if the inode does not support direct I/O. */ if (!ext4_should_use_dio(iocb, from)) { if (ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); return ext4_buffered_write_iter(iocb, from); } /* * Prevent inline data from being created since we are going to allocate * blocks for DIO. We know the inode does not currently have inline data * because ext4_should_use_dio() checked for it, but we have to clear * the state flag before the write checks because a lock cycle could * introduce races with other writers. */ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend, &unwritten, &dio_flags); if (ret <= 0) return ret; offset = iocb->ki_pos; count = ret; if (extend) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (ret) goto out; } if (ilock_shared && !unwritten) iomap_ops = &ext4_iomap_overwrite_ops; ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, dio_flags, NULL, 0); if (ret == -ENOTBLK) ret = 0; if (extend) { /* * We always perform extending DIO write synchronously so by * now the IO is completed and ext4_handle_inode_extension() * was called. Cleanup the inode in case of error or race with * writeback of delalloc blocks. */ WARN_ON_ONCE(ret == -EIOCBQUEUED); ext4_inode_extension_cleanup(inode, ret < 0); } out: if (ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); if (ret >= 0 && iov_iter_count(from)) { ssize_t err; loff_t endbyte; /* * There is no support for atomic writes on buffered-io yet, * we should never fallback to buffered-io for DIO atomic * writes. */ WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC); offset = iocb->ki_pos; err = ext4_buffered_write_iter(iocb, from); if (err < 0) return err; /* * We need to ensure that the pages within the page cache for * the range covered by this I/O are written to disk and * invalidated. This is in attempt to preserve the expected * direct I/O semantics in the case we fallback to buffered I/O * to complete off the I/O request. */ ret += err; endbyte = offset + err - 1; err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping, offset, endbyte); if (!err) invalidate_mapping_pages(iocb->ki_filp->f_mapping, offset >> PAGE_SHIFT, endbyte >> PAGE_SHIFT); } return ret; } #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; size_t count; loff_t offset; handle_t *handle; bool extend = false; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) return -EAGAIN; } else { inode_lock(inode); } ret = ext4_write_checks(iocb, from); if (ret <= 0) goto out; offset = iocb->ki_pos; count = iov_iter_count(from); if (offset + count > EXT4_I(inode)->i_disksize) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_orphan_add(handle, inode); if (ret) { ext4_journal_stop(handle); goto out; } extend = true; ext4_journal_stop(handle); } ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); if (extend) { ret = ext4_handle_inode_extension(inode, offset, ret, count); ext4_inode_extension_cleanup(inode, ret < (ssize_t)count); } out: inode_unlock(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } #endif static ssize_t ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { int ret; struct inode *inode = file_inode(iocb->ki_filp); ret = ext4_emergency_state(inode->i_sb); if (unlikely(ret)) return ret; #ifdef CONFIG_FS_DAX if (IS_DAX(inode)) return ext4_dax_write_iter(iocb, from); #endif if (iocb->ki_flags & IOCB_ATOMIC) { size_t len = iov_iter_count(from); if (len < EXT4_SB(inode->i_sb)->s_awu_min || len > EXT4_SB(inode->i_sb)->s_awu_max) return -EINVAL; ret = generic_atomic_write_valid(iocb, from); if (ret) return ret; } if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_write_iter(iocb, from); else return ext4_buffered_write_iter(iocb, from); } #ifdef CONFIG_FS_DAX static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) { int error = 0; vm_fault_t result; int retries = 0; handle_t *handle = NULL; struct inode *inode = file_inode(vmf->vma->vm_file); struct super_block *sb = inode->i_sb; /* * We have to distinguish real writes from writes which will result in a * COW page; COW writes should *not* poke the journal (the file will not * be changed). Doing so would cause unintended failures when mounted * read-only. * * We check for VM_SHARED rather than vmf->cow_page since the latter is * unset for order != 0 (i.e. only in do_cow_fault); for * other sizes, dax_iomap_fault will handle splitting / fallback so that * we eventually come back with a COW page. */ bool write = (vmf->flags & FAULT_FLAG_WRITE) && (vmf->vma->vm_flags & VM_SHARED); struct address_space *mapping = vmf->vma->vm_file->f_mapping; unsigned long pfn; if (write) { sb_start_pagefault(sb); file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(mapping); retry: handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); if (IS_ERR(handle)) { filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); return VM_FAULT_SIGBUS; } } else { filemap_invalidate_lock_shared(mapping); } result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); if (write) { ext4_journal_stop(handle); if ((result & VM_FAULT_ERROR) && error == -ENOSPC && ext4_should_retry_alloc(sb, &retries)) goto retry; /* Handling synchronous page fault? */ if (result & VM_FAULT_NEEDDSYNC) result = dax_finish_sync_fault(vmf, order, pfn); filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); } else { filemap_invalidate_unlock_shared(mapping); } return result; } static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) { return ext4_dax_huge_fault(vmf, 0); } static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, .huge_fault = ext4_dax_huge_fault, .page_mkwrite = ext4_dax_fault, .pfn_mkwrite = ext4_dax_fault, }; #else #define ext4_dax_vm_ops ext4_file_vm_ops #endif static const struct vm_operations_struct ext4_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = ext4_page_mkwrite, }; static int ext4_file_mmap_prepare(struct vm_area_desc *desc) { int ret; struct file *file = desc->file; struct inode *inode = file->f_mapping->host; struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; if (file->f_mode & FMODE_WRITE) ret = ext4_emergency_state(inode->i_sb); else ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0; if (unlikely(ret)) return ret; /* * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev)) return -EOPNOTSUPP; file_accessed(file); if (IS_DAX(file_inode(file))) { desc->vm_ops = &ext4_dax_vm_ops; desc->vm_flags |= VM_HUGEPAGE; } else { desc->vm_ops = &ext4_file_vm_ops; } return 0; } static int ext4_sample_last_mounted(struct super_block *sb, struct vfsmount *mnt) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct path path; char buf[64], *cp; handle_t *handle; int err; if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED))) return 0; if (ext4_emergency_state(sb) || sb_rdonly(sb) || !sb_start_intwrite_trylock(sb)) return 0; ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED); /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); err = 0; if (IS_ERR(cp)) goto out; handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); err = PTR_ERR(handle); if (IS_ERR(handle)) goto out; BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, EXT4_JTR_NONE); if (err) goto out_journal; lock_buffer(sbi->s_sbh); strtomem_pad(sbi->s_es->s_last_mounted, cp, 0); ext4_superblock_csum_set(sb); unlock_buffer(sbi->s_sbh); ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); out_journal: ext4_journal_stop(handle); out: sb_end_intwrite(sb); return err; } static int ext4_file_open(struct inode *inode, struct file *filp) { int ret; if (filp->f_mode & FMODE_WRITE) ret = ext4_emergency_state(inode->i_sb); else ret = ext4_forced_shutdown(inode->i_sb) ? -EIO : 0; if (unlikely(ret)) return ret; ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt); if (ret) return ret; ret = fscrypt_file_open(inode, filp); if (ret) return ret; ret = fsverity_file_open(inode, filp); if (ret) return ret; /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ if (filp->f_mode & FMODE_WRITE) { ret = ext4_inode_attach_jinode(inode); if (ret < 0) return ret; } if (ext4_inode_can_atomic_write(inode)) filp->f_mode |= FMODE_CAN_ATOMIC_WRITE; filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; return dquot_file_open(inode, filp); } /* * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values * by calling generic_file_llseek_size() with the appropriate maxbytes * value for each. */ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = ext4_get_maxbytes(inode); switch (whence) { default: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_HOLE: inode_lock_shared(inode); offset = iomap_seek_hole(inode, offset, &ext4_iomap_report_ops); inode_unlock_shared(inode); break; case SEEK_DATA: inode_lock_shared(inode); offset = iomap_seek_data(inode, offset, &ext4_iomap_report_ops); inode_unlock_shared(inode); break; } if (offset < 0) return offset; return vfs_setpos(file, offset, maxbytes); } const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read_iter = ext4_file_read_iter, .write_iter = ext4_file_write_iter, .iopoll = iocb_bio_iopoll, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, #endif .mmap_prepare = ext4_file_mmap_prepare, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, .get_unmapped_area = thp_get_unmapped_area, .splice_read = ext4_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ext4_fallocate, .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | FOP_DIO_PARALLEL_WRITE | FOP_DONTCACHE, }; const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_file_getattr, .listxattr = ext4_listxattr, .get_inode_acl = ext4_get_acl, .set_acl = ext4_set_acl, .fiemap = ext4_fiemap, .fileattr_get = ext4_fileattr_get, .fileattr_set = ext4_fileattr_set, }; |
| 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM mdio #if !defined(_TRACE_MDIO_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_MDIO_H #include <linux/tracepoint.h> TRACE_EVENT_CONDITION(mdio_access, TP_PROTO(struct mii_bus *bus, char read, u8 addr, unsigned regnum, u16 val, int err), TP_ARGS(bus, read, addr, regnum, val, err), TP_CONDITION(err >= 0), TP_STRUCT__entry( __array(char, busid, MII_BUS_ID_SIZE) __field(char, read) __field(u8, addr) __field(u16, val) __field(unsigned, regnum) ), TP_fast_assign( strscpy(__entry->busid, bus->id, MII_BUS_ID_SIZE); __entry->read = read; __entry->addr = addr; __entry->regnum = regnum; __entry->val = val; ), TP_printk("%s %-5s phy:0x%02hhx reg:0x%02x val:0x%04hx", __entry->busid, __entry->read ? "read" : "write", __entry->addr, __entry->regnum, __entry->val) ); #endif /* if !defined(_TRACE_MDIO_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 3 3 2 2 2 2 1 1 1 1 1 1 1 2 2 2 1 1 2 2 2 2 2 2 1 2 2 2 2 3 3 1 3 3 1 2 1 2 2 2 3 4 1 4 3 3 3 3 3 1 1 2 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 | // SPDX-License-Identifier: GPL-2.0-only /* SocketCAN driver for Microchip CAN BUS Analyzer Tool * * Copyright (C) 2017 Mobica Limited * * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c */ #include <linux/unaligned.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/usb.h> /* vendor and product id */ #define MCBA_MODULE_NAME "mcba_usb" #define MCBA_VENDOR_ID 0x04d8 #define MCBA_PRODUCT_ID 0x0a30 /* driver constants */ #define MCBA_MAX_RX_URBS 20 #define MCBA_MAX_TX_URBS 20 #define MCBA_CTX_FREE MCBA_MAX_TX_URBS /* RX buffer must be bigger than msg size since at the * beginning USB messages are stacked. */ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 #define MBCA_CMD_I_AM_ALIVE_FROM_USB 0xF7 #define MBCA_CMD_CHANGE_BIT_RATE 0xA1 #define MBCA_CMD_TRANSMIT_MESSAGE_EV 0xA3 #define MBCA_CMD_SETUP_TERMINATION_RESISTANCE 0xA8 #define MBCA_CMD_READ_FW_VERSION 0xA9 #define MBCA_CMD_NOTHING_TO_SEND 0xFF #define MBCA_CMD_TRANSMIT_MESSAGE_RSP 0xE2 #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 /* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ #define MCBA_VER_TERMINATION_ON 0 #define MCBA_VER_TERMINATION_OFF 1 #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 #define MCBA_CAN_STATE_WRN_TH 95 #define MCBA_CAN_STATE_ERR_PSV_TH 127 #define MCBA_TERMINATION_DISABLED CAN_TERMINATION_DISABLED #define MCBA_TERMINATION_ENABLED 120 struct mcba_usb_ctx { struct mcba_priv *priv; u32 ndx; bool can; }; /* Structure to hold all of our device specific stuff */ struct mcba_priv { struct can_priv can; /* must be the first member */ struct sk_buff *echo_skb[MCBA_MAX_TX_URBS]; struct mcba_usb_ctx tx_context[MCBA_MAX_TX_URBS]; struct usb_device *udev; struct net_device *netdev; struct usb_anchor tx_submitted; struct usb_anchor rx_submitted; struct can_berr_counter bec; bool usb_ka_first_pass; bool can_ka_first_pass; bool can_speed_check; atomic_t free_ctx_cnt; void *rxbuf[MCBA_MAX_RX_URBS]; dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; int rx_pipe; int tx_pipe; }; /* CAN frame */ struct __packed mcba_usb_msg_can { u8 cmd_id; __be16 eid; __be16 sid; u8 dlc; u8 data[8]; u8 timestamp[4]; u8 checksum; }; /* command frame */ struct __packed mcba_usb_msg { u8 cmd_id; u8 unused[18]; }; struct __packed mcba_usb_msg_ka_usb { u8 cmd_id; u8 termination_state; u8 soft_ver_major; u8 soft_ver_minor; u8 unused[15]; }; struct __packed mcba_usb_msg_ka_can { u8 cmd_id; u8 tx_err_cnt; u8 rx_err_cnt; u8 rx_buff_ovfl; u8 tx_bus_off; __be16 can_bitrate; __le16 rx_lost; u8 can_stat; u8 soft_ver_major; u8 soft_ver_minor; u8 debug_mode; u8 test_complete; u8 test_result; u8 unused[4]; }; struct __packed mcba_usb_msg_change_bitrate { u8 cmd_id; __be16 bitrate; u8 unused[16]; }; struct __packed mcba_usb_msg_termination { u8 cmd_id; u8 termination; u8 unused[17]; }; struct __packed mcba_usb_msg_fw_ver { u8 cmd_id; u8 pic; u8 unused[17]; }; static const struct usb_device_id mcba_usb_table[] = { { USB_DEVICE(MCBA_VENDOR_ID, MCBA_PRODUCT_ID) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, mcba_usb_table); static const u16 mcba_termination[] = { MCBA_TERMINATION_DISABLED, MCBA_TERMINATION_ENABLED }; static const u32 mcba_bitrate[] = { 20000, 33333, 50000, 80000, 83333, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 500000, 625000, 800000, 1000000 }; static inline void mcba_init_ctx(struct mcba_priv *priv) { int i = 0; for (i = 0; i < MCBA_MAX_TX_URBS; i++) { priv->tx_context[i].ndx = MCBA_CTX_FREE; priv->tx_context[i].priv = priv; } atomic_set(&priv->free_ctx_cnt, ARRAY_SIZE(priv->tx_context)); } static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, struct can_frame *cf) { int i = 0; struct mcba_usb_ctx *ctx = NULL; for (i = 0; i < MCBA_MAX_TX_URBS; i++) { if (priv->tx_context[i].ndx == MCBA_CTX_FREE) { ctx = &priv->tx_context[i]; ctx->ndx = i; if (cf) ctx->can = true; else ctx->can = false; atomic_dec(&priv->free_ctx_cnt); break; } } if (!atomic_read(&priv->free_ctx_cnt)) /* That was the last free ctx. Slow down tx path */ netif_stop_queue(priv->netdev); return ctx; } /* mcba_usb_free_ctx and mcba_usb_get_free_ctx are executed by different * threads. The order of execution in below function is important. */ static inline void mcba_usb_free_ctx(struct mcba_usb_ctx *ctx) { /* Increase number of free ctxs before freeing ctx */ atomic_inc(&ctx->priv->free_ctx_cnt); ctx->ndx = MCBA_CTX_FREE; /* Wake up the queue once ctx is marked free */ netif_wake_queue(ctx->priv->netdev); } static void mcba_usb_write_bulk_callback(struct urb *urb) { struct mcba_usb_ctx *ctx = urb->context; struct net_device *netdev; WARN_ON(!ctx); netdev = ctx->priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); if (ctx->can) { if (!netif_device_present(netdev)) return; netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx, NULL); } if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); /* Release the context */ mcba_usb_free_ctx(ctx); } /* Send data to device */ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, struct mcba_usb_msg *usb_msg, struct mcba_usb_ctx *ctx) { struct urb *urb; u8 *buf; int err; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; buf = usb_alloc_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { err = -ENOMEM; goto nomembuf; } memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) goto failed; /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return 0; failed: usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, buf, urb->transfer_dma); if (err == -ENODEV) netif_device_detach(priv->netdev); else netdev_warn(priv->netdev, "failed tx_urb %d\n", err); nomembuf: usb_free_urb(urb); return err; } /* Send data to device */ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); struct can_frame *cf = (struct can_frame *)skb->data; struct mcba_usb_ctx *ctx = NULL; struct net_device_stats *stats = &priv->netdev->stats; u16 sid; int err; struct mcba_usb_msg_can usb_msg = { .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV }; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; ctx = mcba_usb_get_free_ctx(priv, cf); if (!ctx) return NETDEV_TX_BUSY; if (cf->can_id & CAN_EFF_FLAG) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 */ sid = MCBA_SIDL_EXID_MASK; /* store 28-18 bits */ sid |= (cf->can_id & 0x1ffc0000) >> 13; /* store 17-16 bits */ sid |= (cf->can_id & 0x30000) >> 16; put_unaligned_be16(sid, &usb_msg.sid); /* store 15-0 bits */ put_unaligned_be16(cf->can_id & 0xffff, &usb_msg.eid); } else { /* SIDH | SIDL * 10 - 3 | 2 1 0 x x x x x */ put_unaligned_be16((cf->can_id & CAN_SFF_MASK) << 5, &usb_msg.sid); usb_msg.eid = 0; } usb_msg.dlc = cf->len; memcpy(usb_msg.data, cf->data, usb_msg.dlc); if (cf->can_id & CAN_RTR_FLAG) usb_msg.dlc |= MCBA_DLC_RTR_MASK; can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0); err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); if (err) goto xmit_failed; return NETDEV_TX_OK; xmit_failed: can_free_echo_skb(priv->netdev, ctx->ndx, NULL); mcba_usb_free_ctx(ctx); stats->tx_dropped++; return NETDEV_TX_OK; } /* Send cmd to device */ static void mcba_usb_xmit_cmd(struct mcba_priv *priv, struct mcba_usb_msg *usb_msg) { struct mcba_usb_ctx *ctx = NULL; int err; ctx = mcba_usb_get_free_ctx(priv, NULL); if (!ctx) { netdev_err(priv->netdev, "Lack of free ctx. Sending (%d) cmd aborted", usb_msg->cmd_id); return; } err = mcba_usb_xmit(priv, usb_msg, ctx); if (err) netdev_err(priv->netdev, "Failed to send cmd (%d)", usb_msg->cmd_id); } static void mcba_usb_xmit_change_bitrate(struct mcba_priv *priv, u16 bitrate) { struct mcba_usb_msg_change_bitrate usb_msg = { .cmd_id = MBCA_CMD_CHANGE_BIT_RATE }; put_unaligned_be16(bitrate, &usb_msg.bitrate); mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); } static void mcba_usb_xmit_read_fw_ver(struct mcba_priv *priv, u8 pic) { struct mcba_usb_msg_fw_ver usb_msg = { .cmd_id = MBCA_CMD_READ_FW_VERSION, .pic = pic }; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); } static void mcba_usb_process_can(struct mcba_priv *priv, struct mcba_usb_msg_can *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; u16 sid; skb = alloc_can_skb(priv->netdev, &cf); if (!skb) return; sid = get_unaligned_be16(&msg->sid); if (sid & MCBA_SIDL_EXID_MASK) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 */ cf->can_id = CAN_EFF_FLAG; /* store 28-18 bits */ cf->can_id |= (sid & 0xffe0) << 13; /* store 17-16 bits */ cf->can_id |= (sid & 3) << 16; /* store 15-0 bits */ cf->can_id |= get_unaligned_be16(&msg->eid); } else { /* SIDH | SIDL * 10 - 3 | 2 1 0 x x x x x */ cf->can_id = (sid & 0xffe0) >> 5; } cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK); if (msg->dlc & MCBA_DLC_RTR_MASK) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, msg->data, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void mcba_usb_process_ka_usb(struct mcba_priv *priv, struct mcba_usb_msg_ka_usb *msg) { if (unlikely(priv->usb_ka_first_pass)) { netdev_info(priv->netdev, "PIC USB version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->usb_ka_first_pass = false; } if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; } static u32 convert_can2host_bitrate(struct mcba_usb_msg_ka_can *msg) { const u32 bitrate = get_unaligned_be16(&msg->can_bitrate); if ((bitrate == 33) || (bitrate == 83)) return bitrate * 1000 + 333; else return bitrate * 1000; } static void mcba_usb_process_ka_can(struct mcba_priv *priv, struct mcba_usb_msg_ka_can *msg) { if (unlikely(priv->can_ka_first_pass)) { netdev_info(priv->netdev, "PIC CAN version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->can_ka_first_pass = false; } if (unlikely(priv->can_speed_check)) { const u32 bitrate = convert_can2host_bitrate(msg); priv->can_speed_check = false; if (bitrate != priv->can.bittiming.bitrate) netdev_err( priv->netdev, "Wrong bitrate reported by the device (%u). Expected %u", bitrate, priv->can.bittiming.bitrate); } priv->bec.txerr = msg->tx_err_cnt; priv->bec.rxerr = msg->rx_err_cnt; if (msg->tx_bus_off) priv->can.state = CAN_STATE_BUS_OFF; else if ((priv->bec.txerr > MCBA_CAN_STATE_ERR_PSV_TH) || (priv->bec.rxerr > MCBA_CAN_STATE_ERR_PSV_TH)) priv->can.state = CAN_STATE_ERROR_PASSIVE; else if ((priv->bec.txerr > MCBA_CAN_STATE_WRN_TH) || (priv->bec.rxerr > MCBA_CAN_STATE_WRN_TH)) priv->can.state = CAN_STATE_ERROR_WARNING; } static void mcba_usb_process_rx(struct mcba_priv *priv, struct mcba_usb_msg *msg) { switch (msg->cmd_id) { case MBCA_CMD_I_AM_ALIVE_FROM_CAN: mcba_usb_process_ka_can(priv, (struct mcba_usb_msg_ka_can *)msg); break; case MBCA_CMD_I_AM_ALIVE_FROM_USB: mcba_usb_process_ka_usb(priv, (struct mcba_usb_msg_ka_usb *)msg); break; case MBCA_CMD_RECEIVE_MESSAGE: mcba_usb_process_can(priv, (struct mcba_usb_msg_can *)msg); break; case MBCA_CMD_NOTHING_TO_SEND: /* Side effect of communication between PIC_USB and PIC_CAN. * PIC_CAN is telling us that it has nothing to send */ break; case MBCA_CMD_TRANSMIT_MESSAGE_RSP: /* Transmission response from the device containing timestamp */ break; default: netdev_warn(priv->netdev, "Unsupported msg (0x%X)", msg->cmd_id); break; } } /* Callback for reading data from device * * Check urb status, call read function and resubmit urb read operation. */ static void mcba_usb_read_bulk_callback(struct urb *urb) { struct mcba_priv *priv = urb->context; struct net_device *netdev; int retval; int pos = 0; netdev = priv->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { struct mcba_usb_msg *msg; if (pos + sizeof(struct mcba_usb_msg) > urb->actual_length) { netdev_err(priv->netdev, "format error\n"); break; } msg = (struct mcba_usb_msg *)(urb->transfer_buffer + pos); mcba_usb_process_rx(priv, msg); pos += sizeof(struct mcba_usb_msg); } resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* Start USB device */ static int mcba_usb_start(struct mcba_priv *priv) { struct net_device *netdev = priv->netdev; int err, i; mcba_init_ctx(priv); for (i = 0; i < MCBA_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, buf, buf_dma); usb_free_urb(urb); break; } priv->rxbuf[i] = buf; priv->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MCBA_MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_USB); mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_CAN); return err; } /* Open USB device */ static int mcba_usb_open(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; priv->can_speed_check = true; priv->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(netdev); return 0; } static void mcba_urb_unlink(struct mcba_priv *priv) { int i; usb_kill_anchored_urbs(&priv->rx_submitted); for (i = 0; i < MCBA_MAX_RX_URBS; ++i) usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, priv->rxbuf[i], priv->rxbuf_dma[i]); usb_kill_anchored_urbs(&priv->tx_submitted); } /* Close USB device */ static int mcba_usb_close(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); /* Stop polling */ mcba_urb_unlink(priv); close_candev(netdev); return 0; } /* Set network device mode * * Maybe we should leave this function empty, because the device * set mode variable with open command. */ static int mcba_net_set_mode(struct net_device *netdev, enum can_mode mode) { return 0; } static int mcba_net_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct mcba_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } static const struct net_device_ops mcba_netdev_ops = { .ndo_open = mcba_usb_open, .ndo_stop = mcba_usb_close, .ndo_start_xmit = mcba_usb_start_xmit, }; static const struct ethtool_ops mcba_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /* Microchip CANBUS has hardcoded bittiming values by default. * This function sends request via USB to change the speed and align bittiming * values for presentation purposes only */ static int mcba_net_set_bittiming(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); const u16 bitrate_kbps = priv->can.bittiming.bitrate / 1000; mcba_usb_xmit_change_bitrate(priv, bitrate_kbps); return 0; } static int mcba_set_termination(struct net_device *netdev, u16 term) { struct mcba_priv *priv = netdev_priv(netdev); struct mcba_usb_msg_termination usb_msg = { .cmd_id = MBCA_CMD_SETUP_TERMINATION_RESISTANCE }; if (term == MCBA_TERMINATION_ENABLED) usb_msg.termination = MCBA_VER_TERMINATION_ON; else usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); return 0; } static int mcba_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct mcba_priv *priv; int err; struct usb_device *usbdev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *in, *out; err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (err) { dev_err(&intf->dev, "Can't find endpoints\n"); return err; } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); priv->udev = usbdev; priv->netdev = netdev; priv->usb_ka_first_pass = true; priv->can_ka_first_pass = true; priv->can_speed_check = false; init_usb_anchor(&priv->rx_submitted); init_usb_anchor(&priv->tx_submitted); usb_set_intfdata(intf, priv); /* Init CAN device */ priv->can.state = CAN_STATE_STOPPED; priv->can.termination_const = mcba_termination; priv->can.termination_const_cnt = ARRAY_SIZE(mcba_termination); priv->can.bitrate_const = mcba_bitrate; priv->can.bitrate_const_cnt = ARRAY_SIZE(mcba_bitrate); priv->can.do_set_termination = mcba_set_termination; priv->can.do_set_mode = mcba_net_set_mode; priv->can.do_get_berr_counter = mcba_net_get_berr_counter; priv->can.do_set_bittiming = mcba_net_set_bittiming; netdev->netdev_ops = &mcba_netdev_ops; netdev->ethtool_ops = &mcba_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ SET_NETDEV_DEV(netdev, &intf->dev); err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_free_candev; } priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); /* Start USB dev only if we have successfully registered CAN device */ err = mcba_usb_start(priv); if (err) { if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); goto cleanup_unregister_candev; } dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n"); return 0; cleanup_unregister_candev: unregister_candev(priv->netdev); cleanup_free_candev: free_candev(netdev); return err; } /* Called by the usb core when driver is unloaded or device is removed */ static void mcba_usb_disconnect(struct usb_interface *intf) { struct mcba_priv *priv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); netdev_info(priv->netdev, "device disconnected\n"); unregister_candev(priv->netdev); mcba_urb_unlink(priv); free_candev(priv->netdev); } static struct usb_driver mcba_usb_driver = { .name = MCBA_MODULE_NAME, .probe = mcba_usb_probe, .disconnect = mcba_usb_disconnect, .id_table = mcba_usb_table, }; module_usb_driver(mcba_usb_driver); MODULE_AUTHOR("Remigiusz Kołłątaj <remigiusz.kollataj@mobica.com>"); MODULE_DESCRIPTION("SocketCAN driver for Microchip CAN BUS Analyzer Tool"); MODULE_LICENSE("GPL v2"); |
| 5 5 5 5 5 1 3 1 2 1 2 3 4 5 5 4 1 4 5 5 5 5 5 5 5 2 5 2 3 5 4 4 1 3 4 5 5 5 10 10 10 9 9 9 6 9 7 9 5 5 5 5 5 5 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 | // SPDX-License-Identifier: GPL-2.0-only /* * Sony NFC Port-100 Series driver * Copyright (c) 2013, Intel Corporation. * * Partly based/Inspired by Stephen Tiedemann's nfcpy */ #include <linux/module.h> #include <linux/usb.h> #include <net/nfc/digital.h> #define VERSION "0.1" #define SONY_VENDOR_ID 0x054c #define RCS380S_PRODUCT_ID 0x06c1 #define RCS380P_PRODUCT_ID 0x06c3 #define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_NFC_DEP_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK) #define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) /* Standard port100 frame definitions */ #define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \ + 2) /* data[0] CC, data[1] SCC */ #define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ #define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd)) /* * Max extended frame payload len, excluding CC and SCC * which are already in PORT100_FRAME_HEADER_LEN. */ #define PORT100_FRAME_MAX_PAYLOAD_LEN 1001 #define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */ static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = { 0x00, 0x00, 0xff, 0x00, 0xff, 0x00 }; #define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)]) #define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1]) /* start of frame */ #define PORT100_FRAME_SOF 0x00FF #define PORT100_FRAME_EXT 0xFFFF #define PORT100_FRAME_ACK 0x00FF /* Port-100 command: in or out */ #define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */ #define PORT100_FRAME_DIR_OUT 0xD6 #define PORT100_FRAME_DIR_IN 0xD7 /* Port-100 sub-command */ #define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */ #define PORT100_CMD_GET_FIRMWARE_VERSION 0x20 #define PORT100_CMD_GET_COMMAND_TYPE 0x28 #define PORT100_CMD_SET_COMMAND_TYPE 0x2A #define PORT100_CMD_IN_SET_RF 0x00 #define PORT100_CMD_IN_SET_PROTOCOL 0x02 #define PORT100_CMD_IN_COMM_RF 0x04 #define PORT100_CMD_TG_SET_RF 0x40 #define PORT100_CMD_TG_SET_PROTOCOL 0x42 #define PORT100_CMD_TG_SET_RF_OFF 0x46 #define PORT100_CMD_TG_COMM_RF 0x48 #define PORT100_CMD_SWITCH_RF 0x06 #define PORT100_CMD_RESPONSE(cmd) (cmd + 1) #define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \ ((mask) & (0x01 << (cmd_type))) #define PORT100_CMD_TYPE_0 0 #define PORT100_CMD_TYPE_1 1 #define PORT100_CMD_STATUS_OK 0x00 #define PORT100_CMD_STATUS_TIMEOUT 0x80 #define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01 #define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK 0x02 struct port100; typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg, struct sk_buff *resp); /* * Setting sets structure for in_set_rf command * * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table. * This table contains multiple RF setting sets required for RF * communication. * * @in_*_comm_type: Theses fields set the communication type to be used. */ struct port100_in_rf_setting { u8 in_send_set_number; u8 in_send_comm_type; u8 in_recv_set_number; u8 in_recv_comm_type; } __packed; #define PORT100_COMM_TYPE_IN_212F 0x01 #define PORT100_COMM_TYPE_IN_424F 0x02 #define PORT100_COMM_TYPE_IN_106A 0x03 #define PORT100_COMM_TYPE_IN_106B 0x07 static const struct port100_in_rf_setting in_rf_settings[] = { [NFC_DIGITAL_RF_TECH_212F] = { .in_send_set_number = 1, .in_send_comm_type = PORT100_COMM_TYPE_IN_212F, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_212F, }, [NFC_DIGITAL_RF_TECH_424F] = { .in_send_set_number = 1, .in_send_comm_type = PORT100_COMM_TYPE_IN_424F, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_424F, }, [NFC_DIGITAL_RF_TECH_106A] = { .in_send_set_number = 2, .in_send_comm_type = PORT100_COMM_TYPE_IN_106A, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A, }, [NFC_DIGITAL_RF_TECH_106B] = { .in_send_set_number = 3, .in_send_comm_type = PORT100_COMM_TYPE_IN_106B, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_106B, }, /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */ [NFC_DIGITAL_RF_TECH_LAST] = { 0 }, }; /** * struct port100_tg_rf_setting - Setting sets structure for tg_set_rf command * * @tg_set_number: Represents the entry index in the port-100 RF Base Table. * This table contains multiple RF setting sets required for RF * communication. this field is used for both send and receive * settings. * * @tg_comm_type: Sets the communication type to be used to send and receive * data. */ struct port100_tg_rf_setting { u8 tg_set_number; u8 tg_comm_type; } __packed; #define PORT100_COMM_TYPE_TG_106A 0x0B #define PORT100_COMM_TYPE_TG_212F 0x0C #define PORT100_COMM_TYPE_TG_424F 0x0D static const struct port100_tg_rf_setting tg_rf_settings[] = { [NFC_DIGITAL_RF_TECH_106A] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_106A, }, [NFC_DIGITAL_RF_TECH_212F] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_212F, }, [NFC_DIGITAL_RF_TECH_424F] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_424F, }, /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */ [NFC_DIGITAL_RF_TECH_LAST] = { 0 }, }; #define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00 #define PORT100_IN_PROT_ADD_CRC 0x01 #define PORT100_IN_PROT_CHECK_CRC 0x02 #define PORT100_IN_PROT_MULTI_CARD 0x03 #define PORT100_IN_PROT_ADD_PARITY 0x04 #define PORT100_IN_PROT_CHECK_PARITY 0x05 #define PORT100_IN_PROT_BITWISE_AC_RECV_MODE 0x06 #define PORT100_IN_PROT_VALID_BIT_NUMBER 0x07 #define PORT100_IN_PROT_CRYPTO1 0x08 #define PORT100_IN_PROT_ADD_SOF 0x09 #define PORT100_IN_PROT_CHECK_SOF 0x0A #define PORT100_IN_PROT_ADD_EOF 0x0B #define PORT100_IN_PROT_CHECK_EOF 0x0C #define PORT100_IN_PROT_DEAF_TIME 0x0E #define PORT100_IN_PROT_CRM 0x0F #define PORT100_IN_PROT_CRM_MIN_LEN 0x10 #define PORT100_IN_PROT_T1_TAG_FRAME 0x11 #define PORT100_IN_PROT_RFCA 0x12 #define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13 #define PORT100_IN_PROT_END 0x14 #define PORT100_IN_MAX_NUM_PROTOCOLS 19 #define PORT100_TG_PROT_TU 0x00 #define PORT100_TG_PROT_RF_OFF 0x01 #define PORT100_TG_PROT_CRM 0x02 #define PORT100_TG_PROT_END 0x03 #define PORT100_TG_MAX_NUM_PROTOCOLS 3 struct port100_protocol { u8 number; u8 value; } __packed; static const struct port100_protocol in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = { [NFC_DIGITAL_FRAMING_NFCA_SHORT] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 0 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 7 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 0 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 1 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 1 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T1T] = { /* nfc_digital_framing_nfca_short */ { PORT100_IN_PROT_ADD_CRC, 2 }, { PORT100_IN_PROT_CHECK_CRC, 2 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_T1_TAG_FRAME, 2 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T2T] = { /* nfc_digital_framing_nfca_standard */ { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T4T] = { /* nfc_digital_framing_nfca_standard_with_crc_a */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = { /* nfc_digital_framing_nfca_standard */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_T3T] = { /* nfc_digital_framing_nfcf */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = { /* nfc_digital_framing_nfcf */ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = { { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCB] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 20 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 1 }, { PORT100_IN_PROT_CHECK_SOF, 1 }, { PORT100_IN_PROT_ADD_EOF, 1 }, { PORT100_IN_PROT_CHECK_EOF, 1 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCB_T4T] = { /* nfc_digital_framing_nfcb */ { PORT100_IN_PROT_END, 0 }, }, /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */ [NFC_DIGITAL_FRAMING_LAST] = { { PORT100_IN_PROT_END, 0 }, }, }; static const struct port100_protocol tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = { [NFC_DIGITAL_FRAMING_NFCA_SHORT] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T1T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T2T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = { { PORT100_TG_PROT_TU, 1 }, { PORT100_TG_PROT_RF_OFF, 0 }, { PORT100_TG_PROT_CRM, 7 }, { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_T3T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = { { PORT100_TG_PROT_TU, 1 }, { PORT100_TG_PROT_RF_OFF, 0 }, { PORT100_TG_PROT_CRM, 7 }, { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = { { PORT100_TG_PROT_RF_OFF, 1 }, { PORT100_TG_PROT_END, 0 }, }, /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */ [NFC_DIGITAL_FRAMING_LAST] = { { PORT100_TG_PROT_END, 0 }, }, }; struct port100 { struct nfc_digital_dev *nfc_digital_dev; int skb_headroom; int skb_tailroom; struct usb_device *udev; struct usb_interface *interface; struct urb *out_urb; struct urb *in_urb; /* This mutex protects the out_urb and avoids to submit a new command * through port100_send_frame_async() while the previous one is being * canceled through port100_abort_cmd(). */ struct mutex out_urb_lock; struct work_struct cmd_complete_work; u8 cmd_type; /* The digital stack serializes commands to be sent. There is no need * for any queuing/locking mechanism at driver level. */ struct port100_cmd *cmd; bool cmd_cancel; struct completion cmd_cancel_done; }; struct port100_cmd { u8 code; int status; struct sk_buff *req; struct sk_buff *resp; int resp_len; port100_send_async_complete_t complete_cb; void *complete_cb_context; }; struct port100_frame { u8 preamble; __be16 start_frame; __be16 extended_frame; __le16 datalen; u8 datalen_checksum; u8 data[]; } __packed; struct port100_ack_frame { u8 preamble; __be16 start_frame; __be16 ack_frame; u8 postambule; } __packed; struct port100_cb_arg { nfc_digital_cmd_complete_t complete_cb; void *complete_arg; u8 mdaa; }; struct port100_tg_comm_rf_cmd { __le16 guard_time; __le16 send_timeout; u8 mdaa; u8 nfca_param[6]; u8 nfcf_param[18]; u8 mf_halted; u8 arae_flag; __le16 recv_timeout; u8 data[]; } __packed; struct port100_tg_comm_rf_res { u8 comm_type; u8 ar_status; u8 target_activated; __le32 status; u8 data[]; } __packed; /* The rule: value + checksum = 0 */ static inline u8 port100_checksum(u16 value) { return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1; } /* The rule: sum(data elements) + checksum = 0 */ static u8 port100_data_checksum(const u8 *data, int datalen) { u8 sum = 0; int i; for (i = 0; i < datalen; i++) sum += data[i]; return port100_checksum(sum); } static void port100_tx_frame_init(void *_frame, u8 cmd_code) { struct port100_frame *frame = _frame; frame->preamble = 0; frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF); frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT); PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT; PORT100_FRAME_CMD(frame) = cmd_code; frame->datalen = cpu_to_le16(2); } static void port100_tx_frame_finish(void *_frame) { struct port100_frame *frame = _frame; frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen)); PORT100_FRAME_CHECKSUM(frame) = port100_data_checksum(frame->data, le16_to_cpu(frame->datalen)); PORT100_FRAME_POSTAMBLE(frame) = 0; } static void port100_tx_update_payload_len(void *_frame, int len) { struct port100_frame *frame = _frame; le16_add_cpu(&frame->datalen, len); } static bool port100_rx_frame_is_valid(const void *_frame) { u8 checksum; const struct port100_frame *frame = _frame; if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) || frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT)) return false; checksum = port100_checksum(le16_to_cpu(frame->datalen)); if (checksum != frame->datalen_checksum) return false; checksum = port100_data_checksum(frame->data, le16_to_cpu(frame->datalen)); if (checksum != PORT100_FRAME_CHECKSUM(frame)) return false; return true; } static bool port100_rx_frame_is_ack(const struct port100_ack_frame *frame) { return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) && frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK)); } static inline int port100_rx_frame_size(const void *frame) { const struct port100_frame *f = frame; return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) + PORT100_FRAME_TAIL_LEN; } static bool port100_rx_frame_is_cmd_response(const struct port100 *dev, const void *frame) { const struct port100_frame *f = frame; return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code)); } static void port100_recv_response(struct urb *urb) { struct port100 *dev = urb->context; struct port100_cmd *cmd = dev->cmd; u8 *in_frame; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been canceled (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!port100_rx_frame_is_valid(in_frame)) { nfc_err(&dev->interface->dev, "Received an invalid frame\n"); cmd->status = -EIO; goto sched_wq; } print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame, port100_rx_frame_size(in_frame), false); if (!port100_rx_frame_is_cmd_response(dev, in_frame)) { nfc_err(&dev->interface->dev, "It's not the response to the last command\n"); cmd->status = -EIO; goto sched_wq; } sched_wq: schedule_work(&dev->cmd_complete_work); } static int port100_submit_urb_for_response(const struct port100 *dev, gfp_t flags) { dev->in_urb->complete = port100_recv_response; return usb_submit_urb(dev->in_urb, flags); } static void port100_recv_ack(struct urb *urb) { struct port100 *dev = urb->context; struct port100_cmd *cmd = dev->cmd; const struct port100_ack_frame *in_frame; int rc; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been stopped (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!port100_rx_frame_is_ack(in_frame)) { nfc_err(&dev->interface->dev, "Received an invalid ack\n"); cmd->status = -EIO; goto sched_wq; } rc = port100_submit_urb_for_response(dev, GFP_ATOMIC); if (rc) { nfc_err(&dev->interface->dev, "usb_submit_urb failed with result %d\n", rc); cmd->status = rc; goto sched_wq; } return; sched_wq: schedule_work(&dev->cmd_complete_work); } static int port100_submit_urb_for_ack(const struct port100 *dev, gfp_t flags) { dev->in_urb->complete = port100_recv_ack; return usb_submit_urb(dev->in_urb, flags); } static int port100_send_ack(struct port100 *dev) { int rc = 0; mutex_lock(&dev->out_urb_lock); /* * If prior cancel is in-flight (dev->cmd_cancel == true), we * can skip to send cancel. Then this will wait the prior * cancel, or merged into the next cancel rarely if next * cancel was started before waiting done. In any case, this * will be waked up soon or later. */ if (!dev->cmd_cancel) { reinit_completion(&dev->cmd_cancel_done); usb_kill_urb(dev->out_urb); dev->out_urb->transfer_buffer = ack_frame; dev->out_urb->transfer_buffer_length = sizeof(ack_frame); rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); /* * Set the cmd_cancel flag only if the URB has been * successfully submitted. It will be reset by the out * URB completion callback port100_send_complete(). */ dev->cmd_cancel = !rc; } mutex_unlock(&dev->out_urb_lock); if (!rc) wait_for_completion(&dev->cmd_cancel_done); return rc; } static int port100_send_frame_async(struct port100 *dev, const struct sk_buff *out, const struct sk_buff *in, int in_len) { int rc; mutex_lock(&dev->out_urb_lock); /* A command cancel frame as been sent through dev->out_urb. Don't try * to submit a new one. */ if (dev->cmd_cancel) { rc = -EAGAIN; goto exit; } dev->out_urb->transfer_buffer = out->data; dev->out_urb->transfer_buffer_length = out->len; dev->in_urb->transfer_buffer = in->data; dev->in_urb->transfer_buffer_length = in_len; print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); if (rc) goto exit; rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); if (rc) usb_kill_urb(dev->out_urb); exit: mutex_unlock(&dev->out_urb_lock); return rc; } static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code, struct sk_buff *skb) { /* payload is already there, just update datalen */ int payload_len = skb->len; skb_push(skb, PORT100_FRAME_HEADER_LEN); skb_put(skb, PORT100_FRAME_TAIL_LEN); port100_tx_frame_init(skb->data, cmd_code); port100_tx_update_payload_len(skb->data, payload_len); port100_tx_frame_finish(skb->data); } static void port100_send_async_complete(struct port100 *dev) { struct port100_cmd *cmd = dev->cmd; int status = cmd->status; struct sk_buff *req = cmd->req; struct sk_buff *resp = cmd->resp; dev_kfree_skb(req); dev->cmd = NULL; if (status < 0) { cmd->complete_cb(dev, cmd->complete_cb_context, ERR_PTR(status)); dev_kfree_skb(resp); goto done; } skb_put(resp, port100_rx_frame_size(resp->data)); skb_pull(resp, PORT100_FRAME_HEADER_LEN); skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN); cmd->complete_cb(dev, cmd->complete_cb_context, resp); done: kfree(cmd); } static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code, struct sk_buff *req, port100_send_async_complete_t complete_cb, void *complete_cb_context) { struct port100_cmd *cmd; struct sk_buff *resp; int rc; int resp_len = PORT100_FRAME_HEADER_LEN + PORT100_FRAME_MAX_PAYLOAD_LEN + PORT100_FRAME_TAIL_LEN; if (dev->cmd) { nfc_err(&dev->interface->dev, "A command is still in process\n"); return -EBUSY; } resp = alloc_skb(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { dev_kfree_skb(resp); return -ENOMEM; } cmd->code = cmd_code; cmd->req = req; cmd->resp = resp; cmd->resp_len = resp_len; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; port100_build_cmd_frame(dev, cmd_code, req); dev->cmd = cmd; rc = port100_send_frame_async(dev, req, resp, resp_len); if (rc) { kfree(cmd); dev_kfree_skb(resp); dev->cmd = NULL; } return rc; } struct port100_sync_cmd_response { struct sk_buff *resp; struct completion done; }; static void port100_wq_cmd_complete(struct work_struct *work) { struct port100 *dev = container_of(work, struct port100, cmd_complete_work); port100_send_async_complete(dev); } static void port100_send_sync_complete(struct port100 *dev, void *_arg, struct sk_buff *resp) { struct port100_sync_cmd_response *arg = _arg; arg->resp = resp; complete(&arg->done); } static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code, struct sk_buff *req) { int rc; struct port100_sync_cmd_response arg; init_completion(&arg.done); rc = port100_send_cmd_async(dev, cmd_code, req, port100_send_sync_complete, &arg); if (rc) { dev_kfree_skb(req); return ERR_PTR(rc); } wait_for_completion(&arg.done); return arg.resp; } static void port100_send_complete(struct urb *urb) { struct port100 *dev = urb->context; if (dev->cmd_cancel) { complete_all(&dev->cmd_cancel_done); dev->cmd_cancel = false; } switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); } } static void port100_abort_cmd(struct nfc_digital_dev *ddev) { struct port100 *dev = nfc_digital_get_drvdata(ddev); /* An ack will cancel the last issued command */ port100_send_ack(dev); /* cancel the urb request */ usb_kill_urb(dev->in_urb); } static struct sk_buff *port100_alloc_skb(const struct port100 *dev, unsigned int size) { struct sk_buff *skb; skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size, GFP_KERNEL); if (skb) skb_reserve(skb, dev->skb_headroom); return skb; } static int port100_set_command_type(struct port100 *dev, u8 command_type) { struct sk_buff *skb; struct sk_buff *resp; int rc; skb = port100_alloc_skb(dev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, command_type); resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static u64 port100_get_command_type_mask(struct port100 *dev) { struct sk_buff *skb; struct sk_buff *resp; u64 mask; skb = port100_alloc_skb(dev, 0); if (!skb) return 0; resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb); if (IS_ERR(resp)) return 0; if (resp->len < 8) mask = 0; else mask = be64_to_cpu(*(__be64 *)resp->data); dev_kfree_skb(resp); return mask; } static u16 port100_get_firmware_version(struct port100 *dev) { struct sk_buff *skb; struct sk_buff *resp; u16 fw_ver; skb = port100_alloc_skb(dev, 0); if (!skb) return 0; resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION, skb); if (IS_ERR(resp)) return 0; fw_ver = le16_to_cpu(*(__le16 *)resp->data); dev_kfree_skb(resp); return fw_ver; } static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb, *resp; skb = port100_alloc_skb(dev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, on ? 1 : 0); /* Cancel the last command if the device is being switched off */ if (!on) port100_abort_cmd(ddev); resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); dev_kfree_skb(resp); return 0; } static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; struct sk_buff *resp; int rc; if (rf >= NFC_DIGITAL_RF_TECH_LAST) return -EINVAL; skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting)); if (!skb) return -ENOMEM; skb_put_data(skb, &in_rf_settings[rf], sizeof(struct port100_in_rf_setting)); resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param) { struct port100 *dev = nfc_digital_get_drvdata(ddev); const struct port100_protocol *protocols; struct sk_buff *skb; struct sk_buff *resp; int num_protocols; size_t size; int rc; if (param >= NFC_DIGITAL_FRAMING_LAST) return -EINVAL; protocols = in_protocols[param]; num_protocols = 0; while (protocols[num_protocols].number != PORT100_IN_PROT_END) num_protocols++; if (!num_protocols) return 0; size = sizeof(struct port100_protocol) * num_protocols; skb = port100_alloc_skb(dev, size); if (!skb) return -ENOMEM; skb_put_data(skb, protocols, size); resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { if (type == NFC_DIGITAL_CONFIG_RF_TECH) return port100_in_set_rf(ddev, param); if (type == NFC_DIGITAL_CONFIG_FRAMING) return port100_in_set_framing(ddev, param); return -EINVAL; } static void port100_in_comm_rf_complete(struct port100 *dev, void *arg, struct sk_buff *resp) { const struct port100_cb_arg *cb_arg = arg; nfc_digital_cmd_complete_t cb = cb_arg->complete_cb; u32 status; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); goto exit; } if (resp->len < 4) { nfc_err(&dev->interface->dev, "Invalid packet length received\n"); rc = -EIO; goto error; } status = le32_to_cpu(*(__le32 *)resp->data); skb_pull(resp, sizeof(u32)); if (status == PORT100_CMD_STATUS_TIMEOUT) { rc = -ETIMEDOUT; goto error; } if (status != PORT100_CMD_STATUS_OK) { nfc_err(&dev->interface->dev, "in_comm_rf failed with status 0x%08x\n", status); rc = -EIO; goto error; } /* Remove collision bits byte */ skb_pull(resp, 1); goto exit; error: kfree_skb(resp); resp = ERR_PTR(rc); exit: cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp); kfree(cb_arg); } static int port100_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 _timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_cb_arg *cb_arg; __le16 timeout; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; timeout = cpu_to_le16(_timeout * 10); memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16)); return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb, port100_in_comm_rf_complete, cb_arg); } static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; struct sk_buff *resp; int rc; if (rf >= NFC_DIGITAL_RF_TECH_LAST) return -EINVAL; skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting)); if (!skb) return -ENOMEM; skb_put_data(skb, &tg_rf_settings[rf], sizeof(struct port100_tg_rf_setting)); resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param) { struct port100 *dev = nfc_digital_get_drvdata(ddev); const struct port100_protocol *protocols; struct sk_buff *skb; struct sk_buff *resp; int rc; int num_protocols; size_t size; if (param >= NFC_DIGITAL_FRAMING_LAST) return -EINVAL; protocols = tg_protocols[param]; num_protocols = 0; while (protocols[num_protocols].number != PORT100_TG_PROT_END) num_protocols++; if (!num_protocols) return 0; size = sizeof(struct port100_protocol) * num_protocols; skb = port100_alloc_skb(dev, size); if (!skb) return -ENOMEM; skb_put_data(skb, protocols, size); resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { if (type == NFC_DIGITAL_CONFIG_RF_TECH) return port100_tg_set_rf(ddev, param); if (type == NFC_DIGITAL_CONFIG_FRAMING) return port100_tg_set_framing(ddev, param); return -EINVAL; } static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated) { u8 mask; switch (dev->cmd_type) { case PORT100_CMD_TYPE_0: mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK; break; case PORT100_CMD_TYPE_1: mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK | PORT100_MDAA_TGT_WAS_ACTIVATED_MASK; break; default: nfc_err(&dev->interface->dev, "Unknown command type\n"); return false; } return ((tgt_activated & mask) == mask); } static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg, struct sk_buff *resp) { u32 status; const struct port100_cb_arg *cb_arg = arg; nfc_digital_cmd_complete_t cb = cb_arg->complete_cb; struct port100_tg_comm_rf_res *hdr; if (IS_ERR(resp)) goto exit; hdr = (struct port100_tg_comm_rf_res *)resp->data; status = le32_to_cpu(hdr->status); if (cb_arg->mdaa && !port100_tg_target_activated(dev, hdr->target_activated)) { kfree_skb(resp); resp = ERR_PTR(-ETIMEDOUT); goto exit; } skb_pull(resp, sizeof(struct port100_tg_comm_rf_res)); if (status != PORT100_CMD_STATUS_OK) { kfree_skb(resp); if (status == PORT100_CMD_STATUS_TIMEOUT) resp = ERR_PTR(-ETIMEDOUT); else resp = ERR_PTR(-EIO); } exit: cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp); kfree(cb_arg); } static int port100_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_tg_comm_rf_cmd *hdr; struct port100_cb_arg *cb_arg; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd)); hdr = (struct port100_tg_comm_rf_cmd *)skb->data; memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd)); hdr->guard_time = cpu_to_le16(500); hdr->send_timeout = cpu_to_le16(0xFFFF); hdr->recv_timeout = cpu_to_le16(timeout); return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb, port100_tg_comm_rf_complete, cb_arg); } static int port100_listen_mdaa(struct nfc_digital_dev *ddev, struct digital_tg_mdaa_params *params, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_tg_comm_rf_cmd *hdr; struct port100_cb_arg *cb_arg; struct sk_buff *skb; int rc; rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106A); if (rc) return rc; rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); if (rc) return rc; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; cb_arg->mdaa = 1; skb = port100_alloc_skb(dev, 0); if (!skb) { kfree(cb_arg); return -ENOMEM; } skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd)); hdr = (struct port100_tg_comm_rf_cmd *)skb->data; memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd)); hdr->guard_time = 0; hdr->send_timeout = cpu_to_le16(0xFFFF); hdr->mdaa = 1; hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF; hdr->nfca_param[1] = params->sens_res & 0xFF; memcpy(hdr->nfca_param + 2, params->nfcid1, 3); hdr->nfca_param[5] = params->sel_res; memcpy(hdr->nfcf_param, params->nfcid2, 8); hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF; hdr->nfcf_param[17] = params->sc & 0xFF; hdr->recv_timeout = cpu_to_le16(timeout); return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb, port100_tg_comm_rf_complete, cb_arg); } static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { const struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; skb = port100_alloc_skb(dev, 0); if (!skb) return -ENOMEM; return port100_tg_send_cmd(ddev, skb, timeout, cb, arg); } static const struct nfc_digital_ops port100_digital_ops = { .in_configure_hw = port100_in_configure_hw, .in_send_cmd = port100_in_send_cmd, .tg_listen_mdaa = port100_listen_mdaa, .tg_listen = port100_listen, .tg_configure_hw = port100_tg_configure_hw, .tg_send_cmd = port100_tg_send_cmd, .switch_rf = port100_switch_rf, .abort_cmd = port100_abort_cmd, }; static const struct usb_device_id port100_table[] = { { USB_DEVICE(SONY_VENDOR_ID, RCS380S_PRODUCT_ID), }, { USB_DEVICE(SONY_VENDOR_ID, RCS380P_PRODUCT_ID), }, { } }; MODULE_DEVICE_TABLE(usb, port100_table); static int port100_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct port100 *dev; int rc; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int in_endpoint; int out_endpoint; u16 fw_version; u64 cmd_type_mask; int i; dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->out_urb_lock); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; usb_set_intfdata(interface, dev); in_endpoint = out_endpoint = 0; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) in_endpoint = endpoint->bEndpointAddress; if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) out_endpoint = endpoint->bEndpointAddress; } if (!in_endpoint || !out_endpoint) { nfc_err(&interface->dev, "Could not find bulk-in or bulk-out endpoint\n"); rc = -ENODEV; goto error; } dev->in_urb = usb_alloc_urb(0, GFP_KERNEL); dev->out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->in_urb || !dev->out_urb) { nfc_err(&interface->dev, "Could not allocate USB URBs\n"); rc = -ENOMEM; goto error; } usb_fill_bulk_urb(dev->in_urb, dev->udev, usb_rcvbulkpipe(dev->udev, in_endpoint), NULL, 0, NULL, dev); usb_fill_bulk_urb(dev->out_urb, dev->udev, usb_sndbulkpipe(dev->udev, out_endpoint), NULL, 0, port100_send_complete, dev); dev->out_urb->transfer_flags = URB_ZERO_PACKET; dev->skb_headroom = PORT100_FRAME_HEADER_LEN + PORT100_COMM_RF_HEAD_MAX_LEN; dev->skb_tailroom = PORT100_FRAME_TAIL_LEN; init_completion(&dev->cmd_cancel_done); INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete); /* The first thing to do with the Port-100 is to set the command type * to be used. If supported we use command type 1. 0 otherwise. */ cmd_type_mask = port100_get_command_type_mask(dev); if (!cmd_type_mask) { nfc_err(&interface->dev, "Could not get supported command types\n"); rc = -ENODEV; goto error; } if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1)) dev->cmd_type = PORT100_CMD_TYPE_1; else dev->cmd_type = PORT100_CMD_TYPE_0; rc = port100_set_command_type(dev, dev->cmd_type); if (rc) { nfc_err(&interface->dev, "The device does not support command type %u\n", dev->cmd_type); goto error; } fw_version = port100_get_firmware_version(dev); if (!fw_version) nfc_err(&interface->dev, "Could not get device firmware version\n"); nfc_info(&interface->dev, "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n", (fw_version & 0xFF00) >> 8, fw_version & 0xFF); dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops, PORT100_PROTOCOLS, PORT100_CAPABILITIES, dev->skb_headroom, dev->skb_tailroom); if (!dev->nfc_digital_dev) { nfc_err(&interface->dev, "Could not allocate nfc_digital_dev\n"); rc = -ENOMEM; goto error; } nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev); nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { nfc_err(&interface->dev, "Could not register digital device\n"); goto free_nfc_dev; } return 0; free_nfc_dev: nfc_digital_free_device(dev->nfc_digital_dev); error: usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); return rc; } static void port100_disconnect(struct usb_interface *interface) { struct port100 *dev; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); nfc_digital_unregister_device(dev->nfc_digital_dev); nfc_digital_free_device(dev->nfc_digital_dev); usb_kill_urb(dev->in_urb); usb_kill_urb(dev->out_urb); usb_free_urb(dev->in_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); kfree(dev->cmd); nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected\n"); } static struct usb_driver port100_driver = { .name = "port100", .probe = port100_probe, .disconnect = port100_disconnect, .id_table = port100_table, }; module_usb_driver(port100_driver); MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); |
| 57 57 119 119 119 93 115 56 114 119 7 102 119 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 | // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework control definitions. * * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org> */ #include <linux/export.h> #include <media/v4l2-ctrls.h> /* * Returns NULL or a character pointer array containing the menu for * the given control ID. The pointer array ends with a NULL pointer. * An empty string signifies a menu entry that is invalid. This allows * drivers to disable certain options if it is not supported. */ const char * const *v4l2_ctrl_get_menu(u32 id) { static const char * const mpeg_audio_sampling_freq[] = { "44.1 kHz", "48 kHz", "32 kHz", NULL }; static const char * const mpeg_audio_encoding[] = { "MPEG-1/2 Layer I", "MPEG-1/2 Layer II", "MPEG-1/2 Layer III", "MPEG-2/4 AAC", "AC-3", NULL }; static const char * const mpeg_audio_l1_bitrate[] = { "32 kbps", "64 kbps", "96 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "288 kbps", "320 kbps", "352 kbps", "384 kbps", "416 kbps", "448 kbps", NULL }; static const char * const mpeg_audio_l2_bitrate[] = { "32 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", "384 kbps", NULL }; static const char * const mpeg_audio_l3_bitrate[] = { "32 kbps", "40 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", NULL }; static const char * const mpeg_audio_ac3_bitrate[] = { "32 kbps", "40 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", "384 kbps", "448 kbps", "512 kbps", "576 kbps", "640 kbps", NULL }; static const char * const mpeg_audio_mode[] = { "Stereo", "Joint Stereo", "Dual", "Mono", NULL }; static const char * const mpeg_audio_mode_extension[] = { "Bound 4", "Bound 8", "Bound 12", "Bound 16", NULL }; static const char * const mpeg_audio_emphasis[] = { "No Emphasis", "50/15 us", "CCITT J17", NULL }; static const char * const mpeg_audio_crc[] = { "No CRC", "16-bit CRC", NULL }; static const char * const mpeg_audio_dec_playback[] = { "Auto", "Stereo", "Left", "Right", "Mono", "Swapped Stereo", NULL }; static const char * const mpeg_video_encoding[] = { "MPEG-1", "MPEG-2", "MPEG-4 AVC", NULL }; static const char * const mpeg_video_aspect[] = { "1x1", "4x3", "16x9", "2.21x1", NULL }; static const char * const mpeg_video_bitrate_mode[] = { "Variable Bitrate", "Constant Bitrate", "Constant Quality", NULL }; static const char * const mpeg_stream_type[] = { "MPEG-2 Program Stream", "MPEG-2 Transport Stream", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char * const mpeg_stream_vbi_fmt[] = { "No VBI", "Private Packet, IVTV Format", NULL }; static const char * const camera_power_line_frequency[] = { "Disabled", "50 Hz", "60 Hz", "Auto", NULL }; static const char * const camera_exposure_auto[] = { "Auto Mode", "Manual Mode", "Shutter Priority Mode", "Aperture Priority Mode", NULL }; static const char * const camera_exposure_metering[] = { "Average", "Center Weighted", "Spot", "Matrix", NULL }; static const char * const camera_auto_focus_range[] = { "Auto", "Normal", "Macro", "Infinity", NULL }; static const char * const colorfx[] = { "None", "Black & White", "Sepia", "Negative", "Emboss", "Sketch", "Sky Blue", "Grass Green", "Skin Whiten", "Vivid", "Aqua", "Art Freeze", "Silhouette", "Solarization", "Antique", "Set Cb/Cr", NULL }; static const char * const auto_n_preset_white_balance[] = { "Manual", "Auto", "Incandescent", "Fluorescent", "Fluorescent H", "Horizon", "Daylight", "Flash", "Cloudy", "Shade", NULL, }; static const char * const camera_iso_sensitivity_auto[] = { "Manual", "Auto", NULL }; static const char * const scene_mode[] = { "None", "Backlight", "Beach/Snow", "Candle Light", "Dusk/Dawn", "Fall Colors", "Fireworks", "Landscape", "Night", "Party/Indoor", "Portrait", "Sports", "Sunset", "Text", NULL }; static const char * const tune_emphasis[] = { "None", "50 Microseconds", "75 Microseconds", NULL, }; static const char * const header_mode[] = { "Separate Buffer", "Joined With 1st Frame", NULL, }; static const char * const multi_slice[] = { "Single", "Max Macroblocks", "Max Bytes", NULL, }; static const char * const entropy_mode[] = { "CAVLC", "CABAC", NULL, }; static const char * const mpeg_h264_level[] = { "1", "1b", "1.1", "1.2", "1.3", "2", "2.1", "2.2", "3", "3.1", "3.2", "4", "4.1", "4.2", "5", "5.1", "5.2", "6.0", "6.1", "6.2", NULL, }; static const char * const h264_loop_filter[] = { "Enabled", "Disabled", "Disabled at Slice Boundary", NULL, }; static const char * const h264_profile[] = { "Baseline", "Constrained Baseline", "Main", "Extended", "High", "High 10", "High 422", "High 444 Predictive", "High 10 Intra", "High 422 Intra", "High 444 Intra", "CAVLC 444 Intra", "Scalable Baseline", "Scalable High", "Scalable High Intra", "Stereo High", "Multiview High", "Constrained High", NULL, }; static const char * const vui_sar_idc[] = { "Unspecified", "1:1", "12:11", "10:11", "16:11", "40:33", "24:11", "20:11", "32:11", "80:33", "18:11", "15:11", "64:33", "160:99", "4:3", "3:2", "2:1", "Extended SAR", NULL, }; static const char * const h264_fp_arrangement_type[] = { "Checkerboard", "Column", "Row", "Side by Side", "Top Bottom", "Temporal", NULL, }; static const char * const h264_fmo_map_type[] = { "Interleaved Slices", "Scattered Slices", "Foreground with Leftover", "Box Out", "Raster Scan", "Wipe Scan", "Explicit", NULL, }; static const char * const h264_decode_mode[] = { "Slice-Based", "Frame-Based", NULL, }; static const char * const h264_start_code[] = { "No Start Code", "Annex B Start Code", NULL, }; static const char * const h264_hierarchical_coding_type[] = { "Hier Coding B", "Hier Coding P", NULL, }; static const char * const mpeg_mpeg2_level[] = { "Low", "Main", "High 1440", "High", NULL, }; static const char * const mpeg2_profile[] = { "Simple", "Main", "SNR Scalable", "Spatially Scalable", "High", NULL, }; static const char * const mpeg_mpeg4_level[] = { "0", "0b", "1", "2", "3", "3b", "4", "5", NULL, }; static const char * const mpeg4_profile[] = { "Simple", "Advanced Simple", "Core", "Simple Scalable", "Advanced Coding Efficiency", NULL, }; static const char * const vpx_golden_frame_sel[] = { "Use Previous Frame", "Use Previous Specific Frame", NULL, }; static const char * const vp8_profile[] = { "0", "1", "2", "3", NULL, }; static const char * const vp9_profile[] = { "0", "1", "2", "3", NULL, }; static const char * const vp9_level[] = { "1", "1.1", "2", "2.1", "3", "3.1", "4", "4.1", "5", "5.1", "5.2", "6", "6.1", "6.2", NULL, }; static const char * const flash_led_mode[] = { "Off", "Flash", "Torch", NULL, }; static const char * const flash_strobe_source[] = { "Software", "External", NULL, }; static const char * const jpeg_chroma_subsampling[] = { "4:4:4", "4:2:2", "4:2:0", "4:1:1", "4:1:0", "Gray", NULL, }; static const char * const dv_tx_mode[] = { "DVI-D", "HDMI", NULL, }; static const char * const dv_rgb_range[] = { "Automatic", "RGB Limited Range (16-235)", "RGB Full Range (0-255)", NULL, }; static const char * const dv_it_content_type[] = { "Graphics", "Photo", "Cinema", "Game", "No IT Content", NULL, }; static const char * const detect_md_mode[] = { "Disabled", "Global", "Threshold Grid", "Region Grid", NULL, }; static const char * const av1_profile[] = { "Main", "High", "Professional", NULL, }; static const char * const av1_level[] = { "2.0", "2.1", "2.2", "2.3", "3.0", "3.1", "3.2", "3.3", "4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2", "6.3", "7.0", "7.1", "7.2", "7.3", NULL, }; static const char * const hevc_profile[] = { "Main", "Main Still Picture", "Main 10", NULL, }; static const char * const hevc_level[] = { "1", "2", "2.1", "3", "3.1", "4", "4.1", "5", "5.1", "5.2", "6", "6.1", "6.2", NULL, }; static const char * const hevc_hierarchial_coding_type[] = { "B", "P", NULL, }; static const char * const hevc_refresh_type[] = { "None", "CRA", "IDR", NULL, }; static const char * const hevc_size_of_length_field[] = { "0", "1", "2", "4", NULL, }; static const char * const hevc_tier[] = { "Main", "High", NULL, }; static const char * const hevc_loop_filter_mode[] = { "Disabled", "Enabled", "Disabled at slice boundary", "NULL", }; static const char * const hevc_decode_mode[] = { "Slice-Based", "Frame-Based", NULL, }; static const char * const hevc_start_code[] = { "No Start Code", "Annex B Start Code", NULL, }; static const char * const camera_orientation[] = { "Front", "Back", "External", NULL, }; static const char * const mpeg_video_frame_skip[] = { "Disabled", "Level Limit", "VBV/CPB Limit", NULL, }; static const char * const intra_refresh_period_type[] = { "Random", "Cyclic", NULL, }; switch (id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return mpeg_audio_sampling_freq; case V4L2_CID_MPEG_AUDIO_ENCODING: return mpeg_audio_encoding; case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return mpeg_audio_l1_bitrate; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return mpeg_audio_l2_bitrate; case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return mpeg_audio_l3_bitrate; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return mpeg_audio_ac3_bitrate; case V4L2_CID_MPEG_AUDIO_MODE: return mpeg_audio_mode; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return mpeg_audio_mode_extension; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return mpeg_audio_emphasis; case V4L2_CID_MPEG_AUDIO_CRC: return mpeg_audio_crc; case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return mpeg_audio_dec_playback; case V4L2_CID_MPEG_VIDEO_ENCODING: return mpeg_video_encoding; case V4L2_CID_MPEG_VIDEO_ASPECT: return mpeg_video_aspect; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return mpeg_video_bitrate_mode; case V4L2_CID_MPEG_STREAM_TYPE: return mpeg_stream_type; case V4L2_CID_MPEG_STREAM_VBI_FMT: return mpeg_stream_vbi_fmt; case V4L2_CID_POWER_LINE_FREQUENCY: return camera_power_line_frequency; case V4L2_CID_EXPOSURE_AUTO: return camera_exposure_auto; case V4L2_CID_EXPOSURE_METERING: return camera_exposure_metering; case V4L2_CID_AUTO_FOCUS_RANGE: return camera_auto_focus_range; case V4L2_CID_COLORFX: return colorfx; case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return auto_n_preset_white_balance; case V4L2_CID_ISO_SENSITIVITY_AUTO: return camera_iso_sensitivity_auto; case V4L2_CID_SCENE_MODE: return scene_mode; case V4L2_CID_TUNE_PREEMPHASIS: return tune_emphasis; case V4L2_CID_TUNE_DEEMPHASIS: return tune_emphasis; case V4L2_CID_FLASH_LED_MODE: return flash_led_mode; case V4L2_CID_FLASH_STROBE_SOURCE: return flash_strobe_source; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return header_mode; case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return mpeg_video_frame_skip; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return multi_slice; case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return entropy_mode; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return mpeg_h264_level; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return h264_loop_filter; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return h264_profile; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return vui_sar_idc; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return h264_fp_arrangement_type; case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return h264_fmo_map_type; case V4L2_CID_STATELESS_H264_DECODE_MODE: return h264_decode_mode; case V4L2_CID_STATELESS_H264_START_CODE: return h264_start_code; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return h264_hierarchical_coding_type; case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return mpeg_mpeg2_level; case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return mpeg2_profile; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return mpeg_mpeg4_level; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return mpeg4_profile; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return vpx_golden_frame_sel; case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return vp8_profile; case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return vp9_profile; case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return vp9_level; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return jpeg_chroma_subsampling; case V4L2_CID_DV_TX_MODE: return dv_tx_mode; case V4L2_CID_DV_TX_RGB_RANGE: case V4L2_CID_DV_RX_RGB_RANGE: return dv_rgb_range; case V4L2_CID_DV_TX_IT_CONTENT_TYPE: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return dv_it_content_type; case V4L2_CID_DETECT_MD_MODE: return detect_md_mode; case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return hevc_profile; case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return hevc_level; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return hevc_hierarchial_coding_type; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return hevc_refresh_type; case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return hevc_size_of_length_field; case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return hevc_tier; case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return hevc_loop_filter_mode; case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return av1_profile; case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return av1_level; case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return hevc_decode_mode; case V4L2_CID_STATELESS_HEVC_START_CODE: return hevc_start_code; case V4L2_CID_CAMERA_ORIENTATION: return camera_orientation; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return intra_refresh_period_type; default: return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_menu); #define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); }) /* * Returns NULL or an s64 type array containing the menu for given * control ID. The total number of the menu items is returned in @len. */ const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) { static const s64 qmenu_int_vpx_num_partitions[] = { 1, 2, 4, 8, }; static const s64 qmenu_int_vpx_num_ref_frames[] = { 1, 2, 3, }; switch (id) { case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len); case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len); default: *len = 0; return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); /* Return the control name. */ const char *v4l2_ctrl_get_name(u32 id) { switch (id) { /* USER controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_USER_CLASS: return "User Controls"; case V4L2_CID_BRIGHTNESS: return "Brightness"; case V4L2_CID_CONTRAST: return "Contrast"; case V4L2_CID_SATURATION: return "Saturation"; case V4L2_CID_HUE: return "Hue"; case V4L2_CID_AUDIO_VOLUME: return "Volume"; case V4L2_CID_AUDIO_BALANCE: return "Balance"; case V4L2_CID_AUDIO_BASS: return "Bass"; case V4L2_CID_AUDIO_TREBLE: return "Treble"; case V4L2_CID_AUDIO_MUTE: return "Mute"; case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; case V4L2_CID_BLACK_LEVEL: return "Black Level"; case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic"; case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance"; case V4L2_CID_RED_BALANCE: return "Red Balance"; case V4L2_CID_BLUE_BALANCE: return "Blue Balance"; case V4L2_CID_GAMMA: return "Gamma"; case V4L2_CID_EXPOSURE: return "Exposure"; case V4L2_CID_AUTOGAIN: return "Gain, Automatic"; case V4L2_CID_GAIN: return "Gain"; case V4L2_CID_HFLIP: return "Horizontal Flip"; case V4L2_CID_VFLIP: return "Vertical Flip"; case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; case V4L2_CID_HUE_AUTO: return "Hue, Automatic"; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature"; case V4L2_CID_SHARPNESS: return "Sharpness"; case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation"; case V4L2_CID_CHROMA_AGC: return "Chroma AGC"; case V4L2_CID_COLOR_KILLER: return "Color Killer"; case V4L2_CID_COLORFX: return "Color Effects"; case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic"; case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter"; case V4L2_CID_ROTATE: return "Rotate"; case V4L2_CID_BG_COLOR: return "Background Color"; case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr"; case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB"; /* * Codec controls * * The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical. * * Keep the order of the 'case's the same as in videodev2.h! */ case V4L2_CID_CODEC_CLASS: return "Codec Controls"; case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type"; case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID"; case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID"; case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID"; case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID"; case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID"; case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency"; case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding"; case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate"; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate"; case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate"; case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode"; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension"; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis"; case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC"; case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute"; case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate"; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate"; case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback"; case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback"; case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding"; case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect"; case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames"; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size"; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure"; case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown"; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode"; case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality"; case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate"; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate"; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode"; case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay"; case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable"; case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters"; case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode"; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile"; case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR"; case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type"; case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering"; case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO"; case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups"; case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change"; case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp"; case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs"; case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering"; case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: return "H264 Set QP Value for HC Layers"; case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: return "H264 Constrained Intra Pred"; case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate"; case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level"; case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile"; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color"; case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control"; case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID"; case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count"; case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index"; case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames"; case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value"; case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value"; case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value"; /* VPX controls */ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable"; case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame"; case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range"; case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control"; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period"; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator"; case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile"; case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile"; case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level"; /* HEVC controls */ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile"; case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level"; case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier"; case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth"; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type"; case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction"; case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding"; case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront"; case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB"; case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID"; case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing"; case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split"; case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs"; case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode"; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR"; case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset"; case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset"; case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field"; case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame"; case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR"; /* AV1 controls */ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile"; case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level"; /* CAMERA controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_CAMERA_CLASS: return "Camera Controls"; case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure"; case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute"; case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate"; case V4L2_CID_PAN_RELATIVE: return "Pan, Relative"; case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative"; case V4L2_CID_PAN_RESET: return "Pan, Reset"; case V4L2_CID_TILT_RESET: return "Tilt, Reset"; case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute"; case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute"; case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute"; case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative"; case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous"; case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative"; case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous"; case V4L2_CID_PRIVACY: return "Privacy"; case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute"; case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative"; case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias"; case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset"; case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range"; case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization"; case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity"; case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto"; case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode"; case V4L2_CID_SCENE_MODE: return "Scene Mode"; case V4L2_CID_3A_LOCK: return "3A Lock"; case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start"; case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; case V4L2_CID_PAN_SPEED: return "Pan, Speed"; case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size"; case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation"; case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation"; case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode"; /* FM Radio Modulator controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls"; case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation"; case V4L2_CID_RDS_TX_PI: return "RDS Program ID"; case V4L2_CID_RDS_TX_PTY: return "RDS Program Type"; case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name"; case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text"; case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo"; case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head"; case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed"; case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY"; case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music"; case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies"; case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies"; case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time"; case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; /* Flash controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_FLASH_CLASS: return "Flash Controls"; case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; case V4L2_CID_FLASH_STROBE: return "Strobe"; case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; case V4L2_CID_FLASH_FAULT: return "Faults"; case V4L2_CID_FLASH_CHARGE: return "Charge"; case V4L2_CID_FLASH_READY: return "Ready to Strobe"; /* JPEG encoder controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls"; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling"; case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval"; case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality"; case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers"; /* Image source controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls"; case V4L2_CID_VBLANK: return "Vertical Blanking"; case V4L2_CID_HBLANK: return "Horizontal Blanking"; case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; case V4L2_CID_NOTIFY_GAINS: return "Notify Gains"; /* Image processing controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; case V4L2_CID_LINK_FREQ: return "Link Frequency"; case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; case V4L2_CID_TEST_PATTERN: return "Test Pattern"; case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode"; case V4L2_CID_DIGITAL_GAIN: return "Digital Gain"; /* DV controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_DV_CLASS: return "Digital Video Controls"; case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present"; case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present"; case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; case V4L2_CID_RDS_RECEPTION: return "RDS Reception"; case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls"; case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain"; case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto"; case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain"; case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto"; case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain"; case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto"; case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain"; case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto"; case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth"; case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock"; case V4L2_CID_RDS_RX_PTY: return "RDS Program Type"; case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name"; case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text"; case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music"; /* Detection controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_DETECT_CLASS: return "Detection Controls"; case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode"; case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold"; case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid"; case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid"; /* Stateless Codec controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls"; case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode"; case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code"; case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set"; case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set"; case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix"; case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table"; case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters"; case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters"; case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters"; case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters"; case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header"; case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header"; case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices"; case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates"; case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters"; case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set"; case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set"; case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters"; case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix"; case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters"; case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode"; case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code"; case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets"; case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters"; case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry"; case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters"; case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain"; /* Colorimetry controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls"; case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info"; case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display"; default: return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_name); void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags) { *name = v4l2_ctrl_get_name(id); *flags = 0; switch (id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_LOUDNESS: case V4L2_CID_AUTO_WHITE_BALANCE: case V4L2_CID_AUTOGAIN: case V4L2_CID_HFLIP: case V4L2_CID_VFLIP: case V4L2_CID_HUE_AUTO: case V4L2_CID_CHROMA_AGC: case V4L2_CID_COLOR_KILLER: case V4L2_CID_AUTOBRIGHTNESS: case V4L2_CID_MPEG_AUDIO_MUTE: case V4L2_CID_MPEG_VIDEO_MUTE: case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: case V4L2_CID_MPEG_VIDEO_PULLDOWN: case V4L2_CID_EXPOSURE_AUTO_PRIORITY: case V4L2_CID_FOCUS_AUTO: case V4L2_CID_PRIVACY: case V4L2_CID_AUDIO_LIMITER_ENABLED: case V4L2_CID_AUDIO_COMPRESSION_ENABLED: case V4L2_CID_PILOT_TONE_ENABLED: case V4L2_CID_ILLUMINATORS_1: case V4L2_CID_ILLUMINATORS_2: case V4L2_CID_FLASH_STROBE_STATUS: case V4L2_CID_FLASH_CHARGE: case V4L2_CID_FLASH_READY: case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: case V4L2_CID_WIDE_DYNAMIC_RANGE: case V4L2_CID_IMAGE_STABILIZATION: case V4L2_CID_RDS_RECEPTION: case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_PLL_LOCK: case V4L2_CID_RDS_TX_MONO_STEREO: case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: case V4L2_CID_RDS_TX_COMPRESSED: case V4L2_CID_RDS_TX_DYNAMIC_PTY: case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_TX_MUSIC_SPEECH: case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_RX_MUSIC_SPEECH: *type = V4L2_CTRL_TYPE_BOOLEAN; *min = 0; *max = *step = 1; break; case V4L2_CID_ROTATE: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; break; case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: *type = V4L2_CTRL_TYPE_INTEGER; break; case V4L2_CID_MPEG_VIDEO_LTR_COUNT: *type = V4L2_CTRL_TYPE_INTEGER; break; case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: *type = V4L2_CTRL_TYPE_BITMASK; *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: case V4L2_CID_PAN_RESET: case V4L2_CID_TILT_RESET: case V4L2_CID_FLASH_STROBE: case V4L2_CID_FLASH_STROBE_STOP: case V4L2_CID_AUTO_FOCUS_START: case V4L2_CID_AUTO_FOCUS_STOP: case V4L2_CID_DO_WHITE_BALANCE: *type = V4L2_CTRL_TYPE_BUTTON; *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; *min = *max = *step = *def = 0; break; case V4L2_CID_POWER_LINE_FREQUENCY: case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: case V4L2_CID_MPEG_AUDIO_ENCODING: case V4L2_CID_MPEG_AUDIO_L1_BITRATE: case V4L2_CID_MPEG_AUDIO_L2_BITRATE: case V4L2_CID_MPEG_AUDIO_L3_BITRATE: case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: case V4L2_CID_MPEG_AUDIO_MODE: case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: case V4L2_CID_MPEG_AUDIO_EMPHASIS: case V4L2_CID_MPEG_AUDIO_CRC: case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: case V4L2_CID_MPEG_VIDEO_ENCODING: case V4L2_CID_MPEG_VIDEO_ASPECT: case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: case V4L2_CID_MPEG_STREAM_TYPE: case V4L2_CID_MPEG_STREAM_VBI_FMT: case V4L2_CID_EXPOSURE_AUTO: case V4L2_CID_AUTO_FOCUS_RANGE: case V4L2_CID_COLORFX: case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: case V4L2_CID_TUNE_PREEMPHASIS: case V4L2_CID_FLASH_LED_MODE: case V4L2_CID_FLASH_STROBE_SOURCE: case V4L2_CID_MPEG_VIDEO_HEADER_MODE: case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: case V4L2_CID_MPEG_VIDEO_H264_LEVEL: case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: case V4L2_CID_MPEG_VIDEO_H264_PROFILE: case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: case V4L2_CID_ISO_SENSITIVITY_AUTO: case V4L2_CID_EXPOSURE_METERING: case V4L2_CID_SCENE_MODE: case V4L2_CID_DV_TX_MODE: case V4L2_CID_DV_TX_RGB_RANGE: case V4L2_CID_DV_TX_IT_CONTENT_TYPE: case V4L2_CID_DV_RX_RGB_RANGE: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_TEST_PATTERN: case V4L2_CID_DEINTERLACING_MODE: case V4L2_CID_TUNE_DEEMPHASIS: case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: case V4L2_CID_DETECT_MD_MODE: case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: case V4L2_CID_MPEG_VIDEO_HEVC_TIER: case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: case V4L2_CID_STATELESS_HEVC_DECODE_MODE: case V4L2_CID_STATELESS_HEVC_START_CODE: case V4L2_CID_STATELESS_H264_DECODE_MODE: case V4L2_CID_STATELESS_H264_START_CODE: case V4L2_CID_CAMERA_ORIENTATION: case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: case V4L2_CID_HDR_SENSOR_MODE: *type = V4L2_CTRL_TYPE_MENU; break; case V4L2_CID_LINK_FREQ: *type = V4L2_CTRL_TYPE_INTEGER_MENU; break; case V4L2_CID_RDS_TX_PS_NAME: case V4L2_CID_RDS_TX_RADIO_TEXT: case V4L2_CID_RDS_RX_PS_NAME: case V4L2_CID_RDS_RX_RADIO_TEXT: *type = V4L2_CTRL_TYPE_STRING; break; case V4L2_CID_ISO_SENSITIVITY: case V4L2_CID_AUTO_EXPOSURE_BIAS: case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: *type = V4L2_CTRL_TYPE_INTEGER_MENU; break; case V4L2_CID_USER_CLASS: case V4L2_CID_CAMERA_CLASS: case V4L2_CID_CODEC_CLASS: case V4L2_CID_FM_TX_CLASS: case V4L2_CID_FLASH_CLASS: case V4L2_CID_JPEG_CLASS: case V4L2_CID_IMAGE_SOURCE_CLASS: case V4L2_CID_IMAGE_PROC_CLASS: case V4L2_CID_DV_CLASS: case V4L2_CID_FM_RX_CLASS: case V4L2_CID_RF_TUNER_CLASS: case V4L2_CID_DETECT_CLASS: case V4L2_CID_CODEC_STATELESS_CLASS: case V4L2_CID_COLORIMETRY_CLASS: *type = V4L2_CTRL_TYPE_CTRL_CLASS; /* You can neither read nor write these */ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; *min = *max = *step = *def = 0; break; case V4L2_CID_BG_COLOR: case V4L2_CID_COLORFX_RGB: *type = V4L2_CTRL_TYPE_INTEGER; *step = 1; *min = 0; /* Max is calculated as RGB888 that is 2^24 - 1 */ *max = 0xffffff; break; case V4L2_CID_COLORFX_CBCR: *type = V4L2_CTRL_TYPE_INTEGER; *step = 1; *min = 0; *max = 0xffff; break; case V4L2_CID_FLASH_FAULT: case V4L2_CID_JPEG_ACTIVE_MARKER: case V4L2_CID_3A_LOCK: case V4L2_CID_AUTO_FOCUS_STATUS: case V4L2_CID_DV_TX_HOTPLUG: case V4L2_CID_DV_TX_RXSENSE: case V4L2_CID_DV_TX_EDID_PRESENT: case V4L2_CID_DV_RX_POWER_PRESENT: *type = V4L2_CTRL_TYPE_BITMASK; break; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_MPEG_VIDEO_DEC_PTS: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; *min = *def = 0; *max = 0x1ffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_DEC_FRAME: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; *min = *def = 0; *max = 0x7fffffffffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: *type = V4L2_CTRL_TYPE_INTEGER64; *min = 0; /* default for 8 bit black, luma is 16, chroma is 128 */ *def = 0x8000800010LL; *max = 0xffffffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_PIXEL_RATE: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_DETECT_MD_REGION_GRID: *type = V4L2_CTRL_TYPE_U8; break; case V4L2_CID_DETECT_MD_THRESHOLD_GRID: *type = V4L2_CTRL_TYPE_U16; break; case V4L2_CID_RDS_TX_ALT_FREQS: *type = V4L2_CTRL_TYPE_U32; break; case V4L2_CID_STATELESS_MPEG2_SEQUENCE: *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE; break; case V4L2_CID_STATELESS_MPEG2_PICTURE: *type = V4L2_CTRL_TYPE_MPEG2_PICTURE; break; case V4L2_CID_STATELESS_MPEG2_QUANTISATION: *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION; break; case V4L2_CID_STATELESS_FWHT_PARAMS: *type = V4L2_CTRL_TYPE_FWHT_PARAMS; break; case V4L2_CID_STATELESS_H264_SPS: *type = V4L2_CTRL_TYPE_H264_SPS; break; case V4L2_CID_STATELESS_H264_PPS: *type = V4L2_CTRL_TYPE_H264_PPS; break; case V4L2_CID_STATELESS_H264_SCALING_MATRIX: *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX; break; case V4L2_CID_STATELESS_H264_SLICE_PARAMS: *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS; break; case V4L2_CID_STATELESS_H264_DECODE_PARAMS: *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS; break; case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS; break; case V4L2_CID_STATELESS_VP8_FRAME: *type = V4L2_CTRL_TYPE_VP8_FRAME; break; case V4L2_CID_STATELESS_HEVC_SPS: *type = V4L2_CTRL_TYPE_HEVC_SPS; break; case V4L2_CID_STATELESS_HEVC_PPS: *type = V4L2_CTRL_TYPE_HEVC_PPS; break; case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX; break; case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS; break; case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: *type = V4L2_CTRL_TYPE_U32; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR; break; case V4L2_CID_STATELESS_VP9_FRAME: *type = V4L2_CTRL_TYPE_VP9_FRAME; break; case V4L2_CID_STATELESS_AV1_SEQUENCE: *type = V4L2_CTRL_TYPE_AV1_SEQUENCE; break; case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_AV1_FRAME: *type = V4L2_CTRL_TYPE_AV1_FRAME; break; case V4L2_CID_STATELESS_AV1_FILM_GRAIN: *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN; break; case V4L2_CID_UNIT_CELL_SIZE: *type = V4L2_CTRL_TYPE_AREA; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO; break; case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY; break; default: *type = V4L2_CTRL_TYPE_INTEGER; break; } switch (id) { case V4L2_CID_MPEG_AUDIO_ENCODING: case V4L2_CID_MPEG_AUDIO_MODE: case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: case V4L2_CID_MPEG_VIDEO_B_FRAMES: case V4L2_CID_MPEG_STREAM_TYPE: *flags |= V4L2_CTRL_FLAG_UPDATE; break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_BRIGHTNESS: case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: case V4L2_CID_HUE: case V4L2_CID_RED_BALANCE: case V4L2_CID_BLUE_BALANCE: case V4L2_CID_GAMMA: case V4L2_CID_SHARPNESS: case V4L2_CID_CHROMA_GAIN: case V4L2_CID_RDS_TX_DEVIATION: case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: case V4L2_CID_AUDIO_LIMITER_DEVIATION: case V4L2_CID_AUDIO_COMPRESSION_GAIN: case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: case V4L2_CID_PILOT_TONE_DEVIATION: case V4L2_CID_PILOT_TONE_FREQUENCY: case V4L2_CID_TUNE_POWER_LEVEL: case V4L2_CID_TUNE_ANTENNA_CAPACITOR: case V4L2_CID_RF_TUNER_RF_GAIN: case V4L2_CID_RF_TUNER_LNA_GAIN: case V4L2_CID_RF_TUNER_MIXER_GAIN: case V4L2_CID_RF_TUNER_IF_GAIN: case V4L2_CID_RF_TUNER_BANDWIDTH: case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: *flags |= V4L2_CTRL_FLAG_SLIDER; break; case V4L2_CID_PAN_RELATIVE: case V4L2_CID_TILT_RELATIVE: case V4L2_CID_FOCUS_RELATIVE: case V4L2_CID_IRIS_RELATIVE: case V4L2_CID_ZOOM_RELATIVE: *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_FLASH_STROBE_STATUS: case V4L2_CID_AUTO_FOCUS_STATUS: case V4L2_CID_FLASH_READY: case V4L2_CID_DV_TX_HOTPLUG: case V4L2_CID_DV_TX_RXSENSE: case V4L2_CID_DV_TX_EDID_PRESENT: case V4L2_CID_DV_RX_POWER_PRESENT: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_RDS_RX_PTY: case V4L2_CID_RDS_RX_PS_NAME: case V4L2_CID_RDS_RX_RADIO_TEXT: case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_RX_MUSIC_SPEECH: case V4L2_CID_CAMERA_ORIENTATION: case V4L2_CID_CAMERA_SENSOR_ROTATION: *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_RF_TUNER_PLL_LOCK: *flags |= V4L2_CTRL_FLAG_VOLATILE; break; } } EXPORT_SYMBOL(v4l2_ctrl_fill); |
| 242 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Supervisor Mode Access Prevention support * * Copyright (C) 2012 Intel Corporation * Author: H. Peter Anvin <hpa@linux.intel.com> */ #ifndef _ASM_X86_SMAP_H #define _ASM_X86_SMAP_H #include <asm/nops.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #ifdef __ASSEMBLER__ #define ASM_CLAC \ ALTERNATIVE "", "clac", X86_FEATURE_SMAP #define ASM_STAC \ ALTERNATIVE "", "stac", X86_FEATURE_SMAP #else /* __ASSEMBLER__ */ /* * The CLAC/STAC instructions toggle the enforcement of * X86_FEATURE_SMAP along with X86_FEATURE_LASS. * * SMAP enforcement is based on the _PAGE_BIT_USER bit in the page * tables. The kernel is not allowed to touch pages with that bit set * unless the AC bit is set. * * Use stac()/clac() when accessing userspace (_PAGE_USER) mappings, * regardless of location. * * Note: a barrier is implicit in alternative(). */ static __always_inline void clac(void) { alternative("", "clac", X86_FEATURE_SMAP); } static __always_inline void stac(void) { alternative("", "stac", X86_FEATURE_SMAP); } /* * LASS enforcement is based on bit 63 of the virtual address. The * kernel is not allowed to touch memory in the lower half of the * virtual address space. * * Use lass_stac()/lass_clac() to toggle the AC bit for kernel data * accesses (!_PAGE_USER) that are blocked by LASS, but not by SMAP. * * Even with the AC bit set, LASS will continue to block instruction * fetches from the user half of the address space. To allow those, * clear CR4.LASS to disable the LASS mechanism entirely. * * Note: a barrier is implicit in alternative(). */ static __always_inline void lass_clac(void) { alternative("", "clac", X86_FEATURE_LASS); } static __always_inline void lass_stac(void) { alternative("", "stac", X86_FEATURE_LASS); } static __always_inline unsigned long smap_save(void) { unsigned long flags; asm volatile ("# smap_save\n\t" ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "", "pushf; pop %0; clac", X86_FEATURE_SMAP) : "=rm" (flags) : : "memory", "cc"); return flags; } static __always_inline void smap_restore(unsigned long flags) { asm volatile ("# smap_restore\n\t" ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "", "push %0; popf", X86_FEATURE_SMAP) : : "g" (flags) : "memory", "cc"); } /* These macros can be used in asm() statements */ #define ASM_CLAC \ ALTERNATIVE("", "clac", X86_FEATURE_SMAP) #define ASM_STAC \ ALTERNATIVE("", "stac", X86_FEATURE_SMAP) #define ASM_CLAC_UNSAFE \ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "clac", X86_FEATURE_SMAP) #define ASM_STAC_UNSAFE \ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "stac", X86_FEATURE_SMAP) #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_SMAP_H */ |
| 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 | // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "capture.h" #include "driver.h" #include "pcm.h" #include "playback.h" /* Software stereo volume control. */ static void change_volume(struct urb *urb_out, int volume[], int bytes_per_frame) { int chn = 0; if (volume[0] == 256 && volume[1] == 256) return; /* maximum volume - no change */ if (bytes_per_frame == 4) { __le16 *p, *buf_end; p = (__le16 *)urb_out->transfer_buffer; buf_end = p + urb_out->transfer_buffer_length / sizeof(*p); for (; p < buf_end; ++p) { short pv = le16_to_cpu(*p); int val = (pv * volume[chn & 1]) >> 8; pv = clamp(val, -0x8000, 0x7fff); *p = cpu_to_le16(pv); ++chn; } } else if (bytes_per_frame == 6) { unsigned char *p, *buf_end; p = (unsigned char *)urb_out->transfer_buffer; buf_end = p + urb_out->transfer_buffer_length; for (; p < buf_end; p += 3) { int val; val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); val = (val * volume[chn & 1]) >> 8; val = clamp(val, -0x800000, 0x7fffff); p[0] = val; p[1] = val >> 8; p[2] = val >> 16; ++chn; } } } /* Create signal for impulse response test. */ static void create_impulse_test_signal(struct snd_line6_pcm *line6pcm, struct urb *urb_out, int bytes_per_frame) { int frames = urb_out->transfer_buffer_length / bytes_per_frame; if (bytes_per_frame == 4) { int i; short *pi = (short *)line6pcm->prev_fbuf; short *po = (short *)urb_out->transfer_buffer; for (i = 0; i < frames; ++i) { po[0] = pi[0]; po[1] = 0; pi += 2; po += 2; } } else if (bytes_per_frame == 6) { int i, j; unsigned char *pi = line6pcm->prev_fbuf; unsigned char *po = urb_out->transfer_buffer; for (i = 0; i < frames; ++i) { for (j = 0; j < bytes_per_frame / 2; ++j) po[j] = pi[j]; for (; j < bytes_per_frame; ++j) po[j] = 0; pi += bytes_per_frame; po += bytes_per_frame; } } if (--line6pcm->impulse_count <= 0) { ((unsigned char *)(urb_out->transfer_buffer))[bytes_per_frame - 1] = line6pcm->impulse_volume; line6pcm->impulse_count = line6pcm->impulse_period; } } /* Add signal to buffer for software monitoring. */ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal, int volume, int bytes_per_frame) { if (volume == 0) return; /* zero volume - no change */ if (bytes_per_frame == 4) { __le16 *pi, *po, *buf_end; pi = (__le16 *)signal; po = (__le16 *)urb_out->transfer_buffer; buf_end = po + urb_out->transfer_buffer_length / sizeof(*po); for (; po < buf_end; ++pi, ++po) { short pov = le16_to_cpu(*po); short piv = le16_to_cpu(*pi); int val = pov + ((piv * volume) >> 8); pov = clamp(val, -0x8000, 0x7fff); *po = cpu_to_le16(pov); } } /* We don't need to handle devices with 6 bytes per frame here since they all support hardware monitoring. */ } /* Find a free URB, prepare audio data, and submit URB. must be called in line6pcm->out.lock context */ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm) { int index; int i, urb_size, urb_frames; int ret; const int bytes_per_frame = line6pcm->properties->bytes_per_channel * line6pcm->properties->playback_hw.channels_max; const int frame_increment = line6pcm->properties->rates.rats[0].num_min; const int frame_factor = line6pcm->properties->rates.rats[0].den * (line6pcm->line6->intervals_per_second / LINE6_ISO_INTERVAL); struct urb *urb_out; index = find_first_zero_bit(&line6pcm->out.active_urbs, line6pcm->line6->iso_buffers); if (index < 0 || index >= line6pcm->line6->iso_buffers) { dev_err(line6pcm->line6->ifcdev, "no free URB found\n"); return -EINVAL; } urb_out = line6pcm->out.urbs[index]; urb_size = 0; /* TODO: this may not work for LINE6_ISO_PACKETS != 1 */ for (i = 0; i < LINE6_ISO_PACKETS; ++i) { /* compute frame size for given sampling rate */ int fsize = 0; struct usb_iso_packet_descriptor *fout = &urb_out->iso_frame_desc[i]; fsize = line6pcm->prev_fsize; if (fsize == 0) { int n; line6pcm->out.count += frame_increment; n = line6pcm->out.count / frame_factor; line6pcm->out.count -= n * frame_factor; fsize = n; } fsize *= bytes_per_frame; fout->offset = urb_size; fout->length = fsize; urb_size += fsize; } if (urb_size == 0) { /* can't determine URB size */ dev_err(line6pcm->line6->ifcdev, "driver bug: urb_size = 0\n"); return -EINVAL; } urb_frames = urb_size / bytes_per_frame; urb_out->transfer_buffer = line6pcm->out.buffer + index * LINE6_ISO_PACKETS * line6pcm->max_packet_size_out; urb_out->transfer_buffer_length = urb_size; urb_out->context = line6pcm; if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running) && !test_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags)) { struct snd_pcm_runtime *runtime = get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime; if (line6pcm->out.pos + urb_frames > runtime->buffer_size) { /* The transferred area goes over buffer boundary, copy the data to the temp buffer. */ int len; len = runtime->buffer_size - line6pcm->out.pos; if (len > 0) { memcpy(urb_out->transfer_buffer, runtime->dma_area + line6pcm->out.pos * bytes_per_frame, len * bytes_per_frame); memcpy(urb_out->transfer_buffer + len * bytes_per_frame, runtime->dma_area, (urb_frames - len) * bytes_per_frame); } else dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); } else { memcpy(urb_out->transfer_buffer, runtime->dma_area + line6pcm->out.pos * bytes_per_frame, urb_out->transfer_buffer_length); } line6pcm->out.pos += urb_frames; if (line6pcm->out.pos >= runtime->buffer_size) line6pcm->out.pos -= runtime->buffer_size; change_volume(urb_out, line6pcm->volume_playback, bytes_per_frame); } else { memset(urb_out->transfer_buffer, 0, urb_out->transfer_buffer_length); } spin_lock_nested(&line6pcm->in.lock, SINGLE_DEPTH_NESTING); if (line6pcm->prev_fbuf) { if (test_bit(LINE6_STREAM_IMPULSE, &line6pcm->out.running)) { create_impulse_test_signal(line6pcm, urb_out, bytes_per_frame); if (test_bit(LINE6_STREAM_PCM, &line6pcm->in.running)) { line6_capture_copy(line6pcm, urb_out->transfer_buffer, urb_out-> transfer_buffer_length); line6_capture_check_period(line6pcm, urb_out->transfer_buffer_length); } } else { if (!(line6pcm->line6->properties->capabilities & LINE6_CAP_HWMON) && line6pcm->out.running && line6pcm->in.running) add_monitor_signal(urb_out, line6pcm->prev_fbuf, line6pcm->volume_monitor, bytes_per_frame); } line6pcm->prev_fbuf = NULL; line6pcm->prev_fsize = 0; } spin_unlock(&line6pcm->in.lock); ret = usb_submit_urb(urb_out, GFP_ATOMIC); if (ret == 0) set_bit(index, &line6pcm->out.active_urbs); else dev_err(line6pcm->line6->ifcdev, "URB out #%d submission failed (%d)\n", index, ret); return 0; } /* Submit all currently available playback URBs. must be called in line6pcm->out.lock context */ int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm) { int ret = 0, i; for (i = 0; i < line6pcm->line6->iso_buffers; ++i) { ret = submit_audio_out_urb(line6pcm); if (ret < 0) break; } return ret; } /* Callback for completed playback URB. */ static void audio_out_callback(struct urb *urb) { int i, index, length = 0, shutdown = 0; unsigned long flags; struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context; struct snd_pcm_substream *substream = get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK); const int bytes_per_frame = line6pcm->properties->bytes_per_channel * line6pcm->properties->playback_hw.channels_max; #if USE_CLEAR_BUFFER_WORKAROUND memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); #endif line6pcm->out.last_frame = urb->start_frame; /* find index of URB */ for (index = 0; index < line6pcm->line6->iso_buffers; index++) if (urb == line6pcm->out.urbs[index]) break; if (index >= line6pcm->line6->iso_buffers) return; /* URB has been unlinked asynchronously */ for (i = 0; i < LINE6_ISO_PACKETS; i++) length += urb->iso_frame_desc[i].length; spin_lock_irqsave(&line6pcm->out.lock, flags); if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) { struct snd_pcm_runtime *runtime = substream->runtime; line6pcm->out.pos_done += length / bytes_per_frame; if (line6pcm->out.pos_done >= runtime->buffer_size) line6pcm->out.pos_done -= runtime->buffer_size; } clear_bit(index, &line6pcm->out.active_urbs); for (i = 0; i < LINE6_ISO_PACKETS; i++) if (urb->iso_frame_desc[i].status == -EXDEV) { shutdown = 1; break; } if (test_and_clear_bit(index, &line6pcm->out.unlink_urbs)) shutdown = 1; if (!shutdown) { submit_audio_out_urb(line6pcm); if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) { line6pcm->out.bytes += length; if (line6pcm->out.bytes >= line6pcm->out.period) { line6pcm->out.bytes %= line6pcm->out.period; spin_unlock(&line6pcm->out.lock); snd_pcm_period_elapsed(substream); spin_lock(&line6pcm->out.lock); } } } spin_unlock_irqrestore(&line6pcm->out.lock, flags); } /* open playback callback */ static int snd_line6_playback_open(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); err = snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &line6pcm->properties->rates); if (err < 0) return err; runtime->hw = line6pcm->properties->playback_hw; return 0; } /* close playback callback */ static int snd_line6_playback_close(struct snd_pcm_substream *substream) { return 0; } /* playback operators */ const struct snd_pcm_ops snd_line6_playback_ops = { .open = snd_line6_playback_open, .close = snd_line6_playback_close, .hw_params = snd_line6_hw_params, .hw_free = snd_line6_hw_free, .prepare = snd_line6_prepare, .trigger = snd_line6_trigger, .pointer = snd_line6_pointer, }; int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) { struct usb_line6 *line6 = line6pcm->line6; int i; line6pcm->out.urbs = kcalloc(line6->iso_buffers, sizeof(struct urb *), GFP_KERNEL); if (line6pcm->out.urbs == NULL) return -ENOMEM; /* create audio URBs and fill in constant values: */ for (i = 0; i < line6->iso_buffers; ++i) { struct urb *urb; /* URB for audio out: */ urb = line6pcm->out.urbs[i] = usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL); if (urb == NULL) return -ENOMEM; urb->dev = line6->usbdev; urb->pipe = usb_sndisocpipe(line6->usbdev, line6->properties->ep_audio_w & USB_ENDPOINT_NUMBER_MASK); urb->transfer_flags = URB_ISO_ASAP; urb->start_frame = -1; urb->number_of_packets = LINE6_ISO_PACKETS; urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; if (usb_urb_ep_type_check(urb)) return -EINVAL; } return 0; } |
| 7 3 6 1 5 1 4 1 1 1 1 4 4 1 1 8 8 9 8 8 7 7 7 7 4 2 2 1 1 2 4 1 4 4 4 4 4 4 4 5 5 5 9 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 | // SPDX-License-Identifier: GPL-2.0-only /* * keyspan_remote: USB driver for the Keyspan DMR * * Copyright (C) 2005 Zymeta Corporation - Michael Downey (downey@zymeta.com) * * This driver has been put together with the support of Innosys, Inc. * and Keyspan, Inc the manufacturers of the Keyspan USB DMR product. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> /* Parameters that can be passed to the driver. */ static int debug; module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); /* Vendor and product ids */ #define USB_KEYSPAN_VENDOR_ID 0x06CD #define USB_KEYSPAN_PRODUCT_UIA11 0x0202 /* Defines for converting the data from the remote. */ #define ZERO 0x18 #define ZERO_MASK 0x1F /* 5 bits for a 0 */ #define ONE 0x3C #define ONE_MASK 0x3F /* 6 bits for a 1 */ #define SYNC 0x3F80 #define SYNC_MASK 0x3FFF /* 14 bits for a SYNC sequence */ #define STOP 0x00 #define STOP_MASK 0x1F /* 5 bits for the STOP sequence */ #define GAP 0xFF #define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */ /* * Table that maps the 31 possible keycodes to input keys. * Currently there are 15 and 17 button models so RESERVED codes * are blank areas in the mapping. */ static const unsigned short keyspan_key_table[] = { KEY_RESERVED, /* 0 is just a place holder. */ KEY_RESERVED, KEY_STOP, KEY_PLAYCD, KEY_RESERVED, KEY_PREVIOUSSONG, KEY_REWIND, KEY_FORWARD, KEY_NEXTSONG, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_PAUSE, KEY_VOLUMEUP, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_VOLUMEDOWN, KEY_RESERVED, KEY_UP, KEY_RESERVED, KEY_MUTE, KEY_LEFT, KEY_ENTER, KEY_RIGHT, KEY_RESERVED, KEY_RESERVED, KEY_DOWN, KEY_RESERVED, KEY_KPASTERISK, KEY_RESERVED, KEY_MENU }; /* table of devices that work with this driver */ static const struct usb_device_id keyspan_table[] = { { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, { } /* Terminating entry */ }; /* Structure to store all the real stuff that a remote sends to us. */ struct keyspan_message { u16 system; u8 button; u8 toggle; }; /* Structure used for all the bit testing magic needed to be done. */ struct bit_tester { u32 tester; int len; int pos; int bits_left; u8 buffer[32]; }; /* Structure to hold all of our driver specific stuff */ struct usb_keyspan { char name[128]; char phys[64]; unsigned short keymap[ARRAY_SIZE(keyspan_key_table)]; struct usb_device *udev; struct input_dev *input; struct usb_interface *interface; struct usb_endpoint_descriptor *in_endpoint; struct urb* irq_urb; int open; dma_addr_t in_dma; unsigned char *in_buffer; /* variables used to parse messages from remote. */ struct bit_tester data; int stage; int toggle; }; static struct usb_driver keyspan_driver; /* * Debug routine that prints out what we've received from the remote. */ static void keyspan_print(struct usb_keyspan* dev) /*unsigned char* data)*/ { char codes[4 * RECV_SIZE]; int i; for (i = 0; i < RECV_SIZE; i++) snprintf(codes + i * 3, 4, "%02x ", dev->in_buffer[i]); dev_info(&dev->udev->dev, "%s\n", codes); } /* * Routine that manages the bit_tester structure. It makes sure that there are * at least bits_needed bits loaded into the tester. */ static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed) { if (dev->data.bits_left >= bits_needed) return 0; /* * Somehow we've missed the last message. The message will be repeated * though so it's not too big a deal */ if (dev->data.pos >= dev->data.len) { dev_dbg(&dev->interface->dev, "%s - Error ran out of data. pos: %d, len: %d\n", __func__, dev->data.pos, dev->data.len); return -1; } /* Load as much as we can into the tester. */ while ((dev->data.bits_left + 7 < (sizeof(dev->data.tester) * 8)) && (dev->data.pos < dev->data.len)) { dev->data.tester += (dev->data.buffer[dev->data.pos++] << dev->data.bits_left); dev->data.bits_left += 8; } return 0; } static void keyspan_report_button(struct usb_keyspan *remote, int button, int press) { struct input_dev *input = remote->input; input_event(input, EV_MSC, MSC_SCAN, button); input_report_key(input, remote->keymap[button], press); input_sync(input); } /* * Routine that handles all the logic needed to parse out the message from the remote. */ static void keyspan_check_data(struct usb_keyspan *remote) { int i; int found = 0; struct keyspan_message message; switch(remote->stage) { case 0: /* * In stage 0 we want to find the start of a message. The remote sends a 0xFF as filler. * So the first byte that isn't a FF should be the start of a new message. */ for (i = 0; i < RECV_SIZE && remote->in_buffer[i] == GAP; ++i); if (i < RECV_SIZE) { memcpy(remote->data.buffer, remote->in_buffer, RECV_SIZE); remote->data.len = RECV_SIZE; remote->data.pos = 0; remote->data.tester = 0; remote->data.bits_left = 0; remote->stage = 1; } break; case 1: /* * Stage 1 we should have 16 bytes and should be able to detect a * SYNC. The SYNC is 14 bits, 7 0's and then 7 1's. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; found = 0; while ((remote->data.bits_left >= 14 || remote->data.pos < remote->data.len) && !found) { for (i = 0; i < 8; ++i) { if (keyspan_load_tester(remote, 14) != 0) { remote->stage = 0; return; } if ((remote->data.tester & SYNC_MASK) == SYNC) { remote->data.tester = remote->data.tester >> 14; remote->data.bits_left -= 14; found = 1; break; } else { remote->data.tester = remote->data.tester >> 1; --remote->data.bits_left; } } } if (!found) { remote->stage = 0; remote->data.len = 0; } else { remote->stage = 2; } break; case 2: /* * Stage 2 we should have 24 bytes which will be enough for a full * message. We need to parse out the system code, button code, * toggle code, and stop. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; message.system = 0; for (i = 0; i < 9; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.system = message.system << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.system = (message.system << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Unknown sequence found in system data.\n", __func__); remote->stage = 0; return; } } message.button = 0; for (i = 0; i < 5; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.button = message.button << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.button = (message.button << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Unknown sequence found in button data.\n", __func__); remote->stage = 0; return; } } keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.toggle = 0; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.toggle = 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Error in message, invalid toggle.\n", __func__); remote->stage = 0; return; } keyspan_load_tester(remote, 5); if ((remote->data.tester & STOP_MASK) == STOP) { remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else { dev_err(&remote->interface->dev, "Bad message received, no stop bit found.\n"); } dev_dbg(&remote->interface->dev, "%s found valid message: system: %d, button: %d, toggle: %d\n", __func__, message.system, message.button, message.toggle); if (message.toggle != remote->toggle) { keyspan_report_button(remote, message.button, 1); keyspan_report_button(remote, message.button, 0); remote->toggle = message.toggle; } remote->stage = 0; break; } } /* * Routine for sending all the initialization messages to the remote. */ static int keyspan_setup(struct usb_device* dev) { int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x11, 0x40, 0x5601, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x44, 0x40, 0x0, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x22, 0x40, 0x0, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); return(retval); } dev_dbg(&dev->dev, "%s - Setup complete.\n", __func__); return(retval); } /* * Routine used to handle a new message that has come in. */ static void keyspan_irq_recv(struct urb *urb) { struct usb_keyspan *dev = urb->context; int retval; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; } if (debug) keyspan_print(dev); keyspan_check_data(dev); resubmit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result: %d\n", __func__, retval); } static int keyspan_open(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); remote->irq_urb->dev = remote->udev; if (usb_submit_urb(remote->irq_urb, GFP_KERNEL)) return -EIO; return 0; } static void keyspan_close(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); usb_kill_urb(remote->irq_urb); } static struct usb_endpoint_descriptor *keyspan_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } /* * Routine that sets up the driver to handle a specific USB device detected on the bus. */ static int keyspan_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_endpoint_descriptor *endpoint; struct usb_keyspan *remote; struct input_dev *input_dev; int i, error; endpoint = keyspan_get_in_endpoint(interface->cur_altsetting); if (!endpoint) return -ENODEV; remote = kzalloc(sizeof(*remote), GFP_KERNEL); input_dev = input_allocate_device(); if (!remote || !input_dev) { error = -ENOMEM; goto fail1; } remote->udev = udev; remote->input = input_dev; remote->interface = interface; remote->in_endpoint = endpoint; remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma); if (!remote->in_buffer) { error = -ENOMEM; goto fail1; } remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!remote->irq_urb) { error = -ENOMEM; goto fail2; } error = keyspan_setup(udev); if (error) { error = -ENODEV; goto fail3; } if (udev->manufacturer) strscpy(remote->name, udev->manufacturer, sizeof(remote->name)); if (udev->product) { if (udev->manufacturer) strlcat(remote->name, " ", sizeof(remote->name)); strlcat(remote->name, udev->product, sizeof(remote->name)); } if (!strlen(remote->name)) snprintf(remote->name, sizeof(remote->name), "USB Keyspan Remote %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, remote->phys, sizeof(remote->phys)); strlcat(remote->phys, "/input0", sizeof(remote->phys)); memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap)); input_dev->name = remote->name; input_dev->phys = remote->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &interface->dev; input_dev->keycode = remote->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(remote->keymap); input_set_capability(input_dev, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, input_dev->evbit); for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++) __set_bit(keyspan_key_table[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_drvdata(input_dev, remote); input_dev->open = keyspan_open; input_dev->close = keyspan_close; /* * Initialize the URB to access the device. * The urb gets sent to the device in keyspan_open() */ usb_fill_int_urb(remote->irq_urb, remote->udev, usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress), remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote, endpoint->bInterval); remote->irq_urb->transfer_dma = remote->in_dma; remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* we can register the device now, as it is ready */ error = input_register_device(remote->input); if (error) goto fail3; /* save our data pointer in this interface device */ usb_set_intfdata(interface, remote); return 0; fail3: usb_free_urb(remote->irq_urb); fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma); fail1: kfree(remote); input_free_device(input_dev); return error; } /* * Routine called when a device is disconnected from the USB. */ static void keyspan_disconnect(struct usb_interface *interface) { struct usb_keyspan *remote; remote = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (remote) { /* We have a valid driver structure so clean up everything we allocated. */ input_unregister_device(remote->input); usb_kill_urb(remote->irq_urb); usb_free_urb(remote->irq_urb); usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma); kfree(remote); } } /* * Standard driver set up sections */ static struct usb_driver keyspan_driver = { .name = "keyspan_remote", .probe = keyspan_probe, .disconnect = keyspan_disconnect, .id_table = keyspan_table }; module_usb_driver(keyspan_driver); MODULE_DEVICE_TABLE(usb, keyspan_table); MODULE_AUTHOR("Michael Downey <downey@zymeta.com>"); MODULE_DESCRIPTION("Driver for the USB Keyspan remote control."); MODULE_LICENSE("GPL"); |
| 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 | // SPDX-License-Identifier: BSD-3-Clause-Clear /* Copyright (C) 2023 MediaTek Inc. */ #include <linux/module.h> #include <linux/firmware.h> #include "mt792x.h" #include "dma.h" static const struct ieee80211_iface_limit if_limits[] = { { .max = MT792x_MAX_INTERFACES, .types = BIT(NL80211_IFTYPE_STATION) }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) } }; static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, .n_limits = ARRAY_SIZE(if_limits), .max_interfaces = MT792x_MAX_INTERFACES, .num_different_channels = 1, .beacon_int_infra_match = true, }, }; static const struct ieee80211_iface_limit if_limits_chanctx_mcc[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_GO) }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } }; static const struct ieee80211_iface_limit if_limits_chanctx_scc[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } }; static const struct ieee80211_iface_combination if_comb_chanctx[] = { { .limits = if_limits_chanctx_mcc, .n_limits = ARRAY_SIZE(if_limits_chanctx_mcc), .max_interfaces = 3, .num_different_channels = 2, .beacon_int_infra_match = false, }, { .limits = if_limits_chanctx_scc, .n_limits = ARRAY_SIZE(if_limits_chanctx_scc), .max_interfaces = 3, .num_different_channels = 1, .beacon_int_infra_match = false, } }; void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; u8 link_id; int qid; if (control->sta) { struct mt792x_link_sta *mlink; struct mt792x_sta *sta; link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); sta = (struct mt792x_sta *)control->sta->drv_priv; mlink = mt792x_sta_to_link(sta, link_id); wcid = &mlink->wcid; } if (vif && !control->sta) { struct mt792x_vif *mvif; mvif = (struct mt792x_vif *)vif->drv_priv; wcid = &mvif->sta.deflink.wcid; } if (vif && control->sta && ieee80211_vif_is_mld(vif)) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_link_sta *link_sta; struct ieee80211_bss_conf *conf; link_id = wcid->link_id; rcu_read_lock(); conf = rcu_dereference(vif->link_conf[link_id]); memcpy(hdr->addr2, conf->addr, ETH_ALEN); link_sta = rcu_dereference(control->sta->link[link_id]); memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); if (vif->type == NL80211_IFTYPE_STATION) memcpy(hdr->addr3, conf->bssid, ETH_ALEN); rcu_read_unlock(); } if (mt76_connac_pm_ref(mphy, &dev->pm)) { mt76_tx(mphy, control->sta, wcid, skb); mt76_connac_pm_unref(mphy, &dev->pm); return; } qid = skb_get_queue_mapping(skb); if (qid >= MT_TXQ_PSD) { qid = IEEE80211_AC_BE; skb_set_queue_mapping(skb, qid); } mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); } EXPORT_SYMBOL_GPL(mt792x_tx); void mt792x_stop(struct ieee80211_hw *hw, bool suspend) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_phy *phy = mt792x_hw_phy(hw); cancel_delayed_work_sync(&phy->mt76->mac_work); cancel_delayed_work_sync(&dev->pm.ps_work); cancel_work_sync(&dev->pm.wake_work); cancel_work_sync(&dev->reset_work); mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); if (is_mt7921(&dev->mt76)) { mt792x_mutex_acquire(dev); mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); mt792x_mutex_release(dev); } clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); } EXPORT_SYMBOL_GPL(mt792x_stop); void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, struct mt792x_bss_conf *mconf, struct mt792x_link_sta *mlink) { struct ieee80211_vif *vif = container_of((void *)mconf->vif, struct ieee80211_vif, drv_priv); struct ieee80211_bss_conf *link_conf; int idx = mlink->wcid.idx; link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76, &mlink->wcid, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx); mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx); spin_lock_bh(&dev->mt76.sta_poll_lock); if (!list_empty(&mlink->wcid.poll_list)) list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); mt76_wcid_cleanup(&dev->mt76, &mlink->wcid); } EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove); void mt792x_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_bss_conf *mconf; mt792x_mutex_acquire(dev); mconf = mt792x_link_conf_to_mconf(&vif->bss_conf); mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_remove_interface); int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; /* no need to update right away, we'll get BSS_CHANGED_QOS */ queue = mt76_connac_lmac_mapping(queue); mvif->bss_conf.queue_params[queue] = *params; return 0; } EXPORT_SYMBOL_GPL(mt792x_conf_tx); int mt792x_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt76_mib_stats *mib = &phy->mib; mt792x_mutex_acquire(phy->dev); stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; mt792x_mutex_release(phy->dev); return 0; } EXPORT_SYMBOL_GPL(mt792x_get_stats); u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; } tsf; u16 n; mt792x_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; /* TSF software read */ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); mt792x_mutex_release(dev); return tsf.t64; } EXPORT_SYMBOL_GPL(mt792x_get_tsf); void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 timestamp) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; u16 n; mt792x_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); /* TSF software overwrite */ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_set_tsf); void mt792x_tx_worker(struct mt76_worker *w) { struct mt792x_dev *dev = container_of(w, struct mt792x_dev, mt76.tx_worker); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { queue_work(dev->mt76.wq, &dev->pm.wake_work); return; } mt76_txq_schedule_all(&dev->mphy); mt76_connac_pm_unref(&dev->mphy, &dev->pm); } EXPORT_SYMBOL_GPL(mt792x_tx_worker); void mt792x_roc_timer(struct timer_list *timer) { struct mt792x_phy *phy = timer_container_of(phy, timer, roc_timer); ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); } EXPORT_SYMBOL_GPL(mt792x_roc_timer); void mt792x_csa_timer(struct timer_list *timer) { struct mt792x_vif *mvif = timer_container_of(mvif, timer, csa_timer); ieee80211_queue_work(mvif->phy->mt76->hw, &mvif->csa_work); } EXPORT_SYMBOL_GPL(mt792x_csa_timer); void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct mt792x_dev *dev = mt792x_hw_dev(hw); wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy), HZ / 2); } EXPORT_SYMBOL_GPL(mt792x_flush); int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); mvif->bss_conf.mt76.ctx = ctx; mctx->bss_conf = &mvif->bss_conf; mutex_unlock(&dev->mt76.mutex); return 0; } EXPORT_SYMBOL_GPL(mt792x_assign_vif_chanctx); void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); mctx->bss_conf = NULL; mvif->bss_conf.mt76.ctx = NULL; mutex_unlock(&dev->mt76.mutex); if (vif->bss_conf.csa_active) { timer_delete_sync(&mvif->csa_timer); cancel_work_sync(&mvif->csa_work); } } EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); void mt792x_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_dev *mdev = &dev->mt76; device_set_wakeup_enable(mdev->dev, enabled); } EXPORT_SYMBOL_GPL(mt792x_set_wakeup); static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = { /* tx counters */ "tx_ampdu_cnt", "tx_mpdu_attempts", "tx_mpdu_success", "tx_pkt_ebf_cnt", "tx_pkt_ibf_cnt", "tx_ampdu_len:0-1", "tx_ampdu_len:2-10", "tx_ampdu_len:11-19", "tx_ampdu_len:20-28", "tx_ampdu_len:29-37", "tx_ampdu_len:38-46", "tx_ampdu_len:47-55", "tx_ampdu_len:56-79", "tx_ampdu_len:80-103", "tx_ampdu_len:104-127", "tx_ampdu_len:128-151", "tx_ampdu_len:152-175", "tx_ampdu_len:176-199", "tx_ampdu_len:200-223", "tx_ampdu_len:224-247", "ba_miss_count", "tx_beamformer_ppdu_iBF", "tx_beamformer_ppdu_eBF", "tx_beamformer_rx_feedback_all", "tx_beamformer_rx_feedback_he", "tx_beamformer_rx_feedback_vht", "tx_beamformer_rx_feedback_ht", "tx_msdu_pack_1", "tx_msdu_pack_2", "tx_msdu_pack_3", "tx_msdu_pack_4", "tx_msdu_pack_5", "tx_msdu_pack_6", "tx_msdu_pack_7", "tx_msdu_pack_8", /* rx counters */ "rx_mpdu_cnt", "rx_ampdu_cnt", "rx_ampdu_bytes_cnt", "rx_ba_cnt", /* per vif counters */ "v_tx_mode_cck", "v_tx_mode_ofdm", "v_tx_mode_ht", "v_tx_mode_ht_gf", "v_tx_mode_vht", "v_tx_mode_he_su", "v_tx_mode_he_ext_su", "v_tx_mode_he_tb", "v_tx_mode_he_mu", "v_tx_mode_eht_su", "v_tx_mode_eht_trig", "v_tx_mode_eht_mu", "v_tx_bw_20", "v_tx_bw_40", "v_tx_bw_80", "v_tx_bw_160", "v_tx_bw_320", "v_tx_mcs_0", "v_tx_mcs_1", "v_tx_mcs_2", "v_tx_mcs_3", "v_tx_mcs_4", "v_tx_mcs_5", "v_tx_mcs_6", "v_tx_mcs_7", "v_tx_mcs_8", "v_tx_mcs_9", "v_tx_mcs_10", "v_tx_mcs_11", "v_tx_mcs_12", "v_tx_mcs_13", "v_tx_nss_1", "v_tx_nss_2", "v_tx_nss_3", "v_tx_nss_4", }; void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset != ETH_SS_STATS) return; memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats)); data += sizeof(mt792x_gstrings_stats); page_pool_ethtool_stats_get_strings(data); } EXPORT_SYMBOL_GPL(mt792x_get_et_strings); int mt792x_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset != ETH_SS_STATS) return 0; return ARRAY_SIZE(mt792x_gstrings_stats) + page_pool_ethtool_stats_get_count(); } EXPORT_SYMBOL_GPL(mt792x_get_et_sset_count); static void mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt76_ethtool_worker_info *wi = wi_data; if (msta->vif->bss_conf.mt76.idx != wi->idx) return; mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true); } void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; int stats_size = ARRAY_SIZE(mt792x_gstrings_stats); struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, .idx = mvif->bss_conf.mt76.idx, }; int i, ei = 0; mt792x_mutex_acquire(dev); mt792x_mac_update_mib_stats(phy); data[ei++] = mib->tx_ampdu_cnt; data[ei++] = mib->tx_mpdu_attempts_cnt; data[ei++] = mib->tx_mpdu_success_cnt; data[ei++] = mib->tx_pkt_ebf_cnt; data[ei++] = mib->tx_pkt_ibf_cnt; /* Tx ampdu stat */ for (i = 0; i < 15; i++) data[ei++] = phy->mt76->aggr_stats[i]; data[ei++] = phy->mib.ba_miss_cnt; /* Tx Beamformer monitor */ data[ei++] = mib->tx_bf_ibf_ppdu_cnt; data[ei++] = mib->tx_bf_ebf_ppdu_cnt; /* Tx Beamformer Rx feedback monitor */ data[ei++] = mib->tx_bf_rx_fb_all_cnt; data[ei++] = mib->tx_bf_rx_fb_he_cnt; data[ei++] = mib->tx_bf_rx_fb_vht_cnt; data[ei++] = mib->tx_bf_rx_fb_ht_cnt; /* Tx amsdu info (pack-count histogram) */ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) data[ei++] = mib->tx_amsdu[i]; /* rx counters */ data[ei++] = mib->rx_mpdu_cnt; data[ei++] = mib->rx_ampdu_cnt; data[ei++] = mib->rx_ampdu_bytes_cnt; data[ei++] = mib->rx_ba_cnt; /* Add values for all stations owned by this vif */ wi.initial_stat_idx = ei; ieee80211_iterate_stations_atomic(hw, mt792x_ethtool_worker, &wi); mt792x_mutex_release(dev); if (!wi.sta_count) return; ei += wi.worker_stat_count; mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei); stats_size += page_pool_ethtool_stats_get_count(); if (ei != stats_size) dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size); } EXPORT_SYMBOL_GPL(mt792x_get_et_stats); void mt792x_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct rate_info *txrate = &msta->deflink.wcid.rate; if (!txrate->legacy && !txrate->flags) return; if (txrate->legacy) { sinfo->txrate.legacy = txrate->legacy; } else { sinfo->txrate.mcs = txrate->mcs; sinfo->txrate.nss = txrate->nss; sinfo->txrate.bw = txrate->bw; sinfo->txrate.he_gi = txrate->he_gi; sinfo->txrate.he_dcm = txrate->he_dcm; sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->ack_signal = (s8)msta->deflink.ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } EXPORT_SYMBOL_GPL(mt792x_sta_statistics); void mt792x_set_coverage_class(struct ieee80211_hw *hw, int radio_idx, s16 coverage_class) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; mt792x_mutex_acquire(dev); phy->coverage_class = max_t(s16, coverage_class, 0); mt792x_mac_set_timeing(phy); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_set_coverage_class); int mt792x_init_wiphy(struct ieee80211_hw *hw) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; hw->queues = 4; if (dev->has_eht) { hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; } else { hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; } hw->netdev_features = NETIF_F_RXCSUM; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; hw->sta_data_size = sizeof(struct mt792x_sta); hw->vif_data_size = sizeof(struct mt792x_vif); hw->chanctx_data_size = sizeof(struct mt792x_chanctx); if (dev->fw_features & MT792x_FW_CAP_CNM) { wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->iface_combinations = if_comb_chanctx; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx); } else { wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION); wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE); wiphy->max_remain_on_channel_duration = 5000; wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; wiphy->max_scan_ssids = 4; wiphy->max_sched_scan_plan_interval = MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL; wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID; wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH; wiphy->max_sched_scan_reqs = 1; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_SPLIT_SCAN_6GHZ; wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); if (is_mt7921(&dev->mt76)) { ieee80211_hw_set(hw, CHANCTX_STA_CSA); } if (dev->pm.enable) ieee80211_hw_set(hw, CONNECTION_MONITOR); hw->max_tx_fragments = 4; return 0; } EXPORT_SYMBOL_GPL(mt792x_init_wiphy); static u8 mt792x_get_offload_capability(struct device *dev, const char *fw_wm) { const struct mt76_connac2_fw_trailer *hdr; struct mt792x_realease_info *rel_info; const struct firmware *fw; int ret, i, offset = 0; const u8 *data, *end; u8 offload_caps = 0; ret = request_firmware(&fw, fw_wm, dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev, "Invalid firmware\n"); goto out; } data = fw->data; hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); for (i = 0; i < hdr->n_region; i++) { const struct mt76_connac2_fw_region *region; region = (const void *)((const u8 *)hdr - (hdr->n_region - i) * sizeof(*region)); offset += le32_to_cpu(region->len); } data += offset + 16; rel_info = (struct mt792x_realease_info *)data; data += sizeof(*rel_info); end = data + le16_to_cpu(rel_info->len); while (data < end) { rel_info = (struct mt792x_realease_info *)data; data += sizeof(*rel_info); if (rel_info->tag == MT792x_FW_TAG_FEATURE) { struct mt792x_fw_features *features; features = (struct mt792x_fw_features *)data; offload_caps = features->data; break; } data += le16_to_cpu(rel_info->len) + rel_info->pad_len; } out: release_firmware(fw); return offload_caps; } struct ieee80211_ops * mt792x_get_mac80211_ops(struct device *dev, const struct ieee80211_ops *mac80211_ops, void *drv_data, u8 *fw_features) { struct ieee80211_ops *ops; ops = devm_kmemdup(dev, mac80211_ops, sizeof(struct ieee80211_ops), GFP_KERNEL); if (!ops) return NULL; *fw_features = mt792x_get_offload_capability(dev, drv_data); if (!(*fw_features & MT792x_FW_CAP_CNM)) { ops->remain_on_channel = NULL; ops->cancel_remain_on_channel = NULL; ops->add_chanctx = ieee80211_emulate_add_chanctx; ops->remove_chanctx = ieee80211_emulate_remove_chanctx; ops->change_chanctx = ieee80211_emulate_change_chanctx; ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx; ops->assign_vif_chanctx = NULL; ops->unassign_vif_chanctx = NULL; ops->mgd_prepare_tx = NULL; ops->mgd_complete_tx = NULL; } return ops; } EXPORT_SYMBOL_GPL(mt792x_get_mac80211_ops); int mt792x_init_wcid(struct mt792x_dev *dev) { int idx; /* Beacon and mgmt frames should occupy wcid 0 */ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); if (idx) return -ENOSPC; dev->mt76.global_wcid.idx = idx; dev->mt76.global_wcid.hw_key_idx = -1; dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); return 0; } EXPORT_SYMBOL_GPL(mt792x_init_wcid); int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err = 0; mutex_lock(&pm->mutex); if (!test_bit(MT76_STATE_PM, &mphy->state)) goto out; err = __mt792x_mcu_drv_pmctrl(dev); out: mutex_unlock(&pm->mutex); if (err) mt792x_reset(&dev->mt76); return err; } EXPORT_SYMBOL_GPL(mt792x_mcu_drv_pmctrl); int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err = 0; mutex_lock(&pm->mutex); if (mt76_connac_skip_fw_pmctrl(mphy, pm)) goto out; err = __mt792x_mcu_fw_pmctrl(dev); out: mutex_unlock(&pm->mutex); if (err) mt792x_reset(&dev->mt76); return err; } EXPORT_SYMBOL_GPL(mt792x_mcu_fw_pmctrl); int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) { int i, err = 0; for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); if (dev->aspm_supported) usleep_range(2000, 3000); if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) break; } if (i == MT792x_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "driver own failed\n"); err = -EIO; } return err; } EXPORT_SYMBOL_GPL(__mt792xe_mcu_drv_pmctrl); int mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err; err = __mt792xe_mcu_drv_pmctrl(dev); if (err < 0) goto out; mt792x_wpdma_reinit_cond(dev); clear_bit(MT76_STATE_PM, &mphy->state); pm->stats.last_wake_event = jiffies; pm->stats.doze_time += pm->stats.last_wake_event - pm->stats.last_doze_event; out: return err; } EXPORT_SYMBOL_GPL(mt792xe_mcu_drv_pmctrl); int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int i; for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1)) break; } if (i == MT792x_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "firmware own failed\n"); clear_bit(MT76_STATE_PM, &mphy->state); return -EIO; } pm->stats.last_doze_event = jiffies; pm->stats.awake_time += pm->stats.last_doze_event - pm->stats.last_wake_event; return 0; } EXPORT_SYMBOL_GPL(mt792xe_mcu_fw_pmctrl); int mt792x_load_firmware(struct mt792x_dev *dev) { int ret; ret = mt76_connac2_load_patch(&dev->mt76, mt792x_patch_name(dev)); if (ret) return ret; if (mt76_is_sdio(&dev->mt76)) { /* activate again */ ret = __mt792x_mcu_fw_pmctrl(dev); if (!ret) ret = __mt792x_mcu_drv_pmctrl(dev); } ret = mt76_connac2_load_ram(&dev->mt76, mt792x_ram_name(dev), NULL); if (ret) return ret; if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, MT_TOP_MISC2_FW_N9_RDY, 1500)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); return -EIO; } #ifdef CONFIG_PM dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; #endif /* CONFIG_PM */ dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; } EXPORT_SYMBOL_GPL(mt792x_load_firmware); void mt792x_config_mac_addr_list(struct mt792x_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; int i; for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; memcpy(addr, dev->mphy.macaddr, ETH_ALEN); if (!i) continue; addr[0] |= BIT(1); addr[0] ^= ((i - 1) << 2); } wiphy->addresses = dev->macaddr_list; wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); } EXPORT_SYMBOL_GPL(mt792x_config_mac_addr_list); MODULE_DESCRIPTION("MediaTek MT792x core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); |
| 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project * <http://rt2x00.serialmonkey.com> * * Module: eeprom_93cx6 * Abstract: EEPROM reader routines for 93cx6 chipsets. * Supported chipsets: 93c46 & 93c66. */ #include <linux/bits.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/eeprom_93cx6.h> MODULE_AUTHOR("http://rt2x00.serialmonkey.com"); MODULE_VERSION("1.0"); MODULE_DESCRIPTION("EEPROM 93cx6 chip driver"); MODULE_LICENSE("GPL"); static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom) { eeprom->reg_data_clock = 1; eeprom->register_write(eeprom); /* * Add a short delay for the pulse to work. * According to the specifications the "maximum minimum" * time should be 450ns. */ ndelay(450); } static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom) { eeprom->reg_data_clock = 0; eeprom->register_write(eeprom); /* * Add a short delay for the pulse to work. * According to the specifications the "maximum minimum" * time should be 450ns. */ ndelay(450); } static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) { /* * Clear all flags, and enable chip select. */ eeprom->register_read(eeprom); eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; eeprom->reg_data_clock = 0; eeprom->reg_chip_select = 1; eeprom->drive_data = 1; eeprom->register_write(eeprom); /* * kick a pulse. */ eeprom_93cx6_pulse_high(eeprom); eeprom_93cx6_pulse_low(eeprom); } static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom) { /* * Clear chip_select and data_in flags. */ eeprom->register_read(eeprom); eeprom->reg_data_in = 0; eeprom->reg_chip_select = 0; eeprom->register_write(eeprom); /* * kick a pulse. */ eeprom_93cx6_pulse_high(eeprom); eeprom_93cx6_pulse_low(eeprom); } static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, const u16 data, const u16 count) { unsigned int i; eeprom->register_read(eeprom); /* * Clear data flags. */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; eeprom->drive_data = 1; /* * Start writing all bits. */ for (i = count; i > 0; i--) { /* * Check if this bit needs to be set. */ eeprom->reg_data_in = !!(data & BIT(i - 1)); /* * Write the bit to the eeprom register. */ eeprom->register_write(eeprom); /* * Kick a pulse. */ eeprom_93cx6_pulse_high(eeprom); eeprom_93cx6_pulse_low(eeprom); } eeprom->reg_data_in = 0; eeprom->register_write(eeprom); } static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, u16 *data, const u16 count) { unsigned int i; u16 buf = 0; eeprom->register_read(eeprom); /* * Clear data flags. */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; eeprom->drive_data = 0; /* * Start reading all bits. */ for (i = count; i > 0; i--) { eeprom_93cx6_pulse_high(eeprom); eeprom->register_read(eeprom); /* * Clear data_in flag. */ eeprom->reg_data_in = 0; /* * Read if the bit has been set. */ if (eeprom->reg_data_out) buf |= BIT(i - 1); eeprom_93cx6_pulse_low(eeprom); } *data = buf; } /** * eeprom_93cx6_read - Read a word from eeprom * @eeprom: Pointer to eeprom structure * @word: Word index from where we should start reading * @data: target pointer where the information will have to be stored * * This function will read the eeprom data as host-endian word * into the given data pointer. */ void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data) { u16 command; /* * Initialize the eeprom register */ eeprom_93cx6_startup(eeprom); /* * Select the read opcode and the word to be read. */ command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word; eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width); if (has_quirk_extra_read_cycle(eeprom)) { eeprom_93cx6_pulse_high(eeprom); eeprom_93cx6_pulse_low(eeprom); } /* * Read the requested 16 bits. */ eeprom_93cx6_read_bits(eeprom, data, 16); /* * Cleanup eeprom register. */ eeprom_93cx6_cleanup(eeprom); } EXPORT_SYMBOL_GPL(eeprom_93cx6_read); /** * eeprom_93cx6_multiread - Read multiple words from eeprom * @eeprom: Pointer to eeprom structure * @word: Word index from where we should start reading * @data: target pointer where the information will have to be stored * @words: Number of words that should be read. * * This function will read all requested words from the eeprom, * this is done by calling eeprom_93cx6_read() multiple times. * But with the additional change that while the eeprom_93cx6_read * will return host ordered bytes, this method will return little * endian words. */ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words) { unsigned int i; u16 tmp; for (i = 0; i < words; i++) { tmp = 0; eeprom_93cx6_read(eeprom, word + i, &tmp); data[i] = cpu_to_le16(tmp); } } EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); /** * eeprom_93cx6_readb - Read a byte from eeprom * @eeprom: Pointer to eeprom structure * @byte: Byte index from where we should start reading * @data: target pointer where the information will have to be stored * * This function will read a byte of the eeprom data * into the given data pointer. */ void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte, u8 *data) { u16 command; u16 tmp; /* * Initialize the eeprom register */ eeprom_93cx6_startup(eeprom); /* * Select the read opcode and the byte to be read. */ command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte; eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1); if (has_quirk_extra_read_cycle(eeprom)) { eeprom_93cx6_pulse_high(eeprom); eeprom_93cx6_pulse_low(eeprom); } /* * Read the requested 8 bits. */ eeprom_93cx6_read_bits(eeprom, &tmp, 8); *data = tmp & 0xff; /* * Cleanup eeprom register. */ eeprom_93cx6_cleanup(eeprom); } EXPORT_SYMBOL_GPL(eeprom_93cx6_readb); /** * eeprom_93cx6_multireadb - Read multiple bytes from eeprom * @eeprom: Pointer to eeprom structure * @byte: Index from where we should start reading * @data: target pointer where the information will have to be stored * @bytes: Number of bytes that should be read. * * This function will read all requested bytes from the eeprom, * this is done by calling eeprom_93cx6_readb() multiple times. */ void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte, u8 *data, const u16 bytes) { unsigned int i; for (i = 0; i < bytes; i++) eeprom_93cx6_readb(eeprom, byte + i, &data[i]); } EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb); /** * eeprom_93cx6_wren - set the write enable state * @eeprom: Pointer to eeprom structure * @enable: true to enable writes, otherwise disable writes * * Set the EEPROM write enable state to either allow or deny * writes depending on the @enable value. */ void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable) { u16 command; /* start the command */ eeprom_93cx6_startup(eeprom); /* create command to enable/disable */ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE; command <<= (eeprom->width - 2); eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width); eeprom_93cx6_cleanup(eeprom); } EXPORT_SYMBOL_GPL(eeprom_93cx6_wren); /** * eeprom_93cx6_write - write data to the EEPROM * @eeprom: Pointer to eeprom structure * @addr: Address to write data to. * @data: The data to write to address @addr. * * Write the @data to the specified @addr in the EEPROM and * waiting for the device to finish writing. * * Note, since we do not expect large number of write operations * we delay in between parts of the operation to avoid using excessive * amounts of CPU time busy waiting. */ void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data) { int timeout = 100; u16 command; /* start the command */ eeprom_93cx6_startup(eeprom); command = PCI_EEPROM_WRITE_OPCODE << eeprom->width; command |= addr; /* send write command */ eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width); /* send data */ eeprom_93cx6_write_bits(eeprom, data, 16); /* get ready to check for busy */ eeprom->drive_data = 0; eeprom->reg_chip_select = 1; eeprom->register_write(eeprom); /* wait at-least 250ns to get DO to be the busy signal */ usleep_range(1000, 2000); /* wait for DO to go high to signify finish */ while (true) { eeprom->register_read(eeprom); if (eeprom->reg_data_out) break; usleep_range(1000, 2000); if (--timeout <= 0) { printk(KERN_ERR "%s: timeout\n", __func__); break; } } eeprom_93cx6_cleanup(eeprom); } EXPORT_SYMBOL_GPL(eeprom_93cx6_write); |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 | /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> */ #ifndef __MT76x02_H #define __MT76x02_H #include <linux/kfifo.h> #include "mt76.h" #include "mt76x02_regs.h" #include "mt76x02_mac.h" #include "mt76x02_dfs.h" #include "mt76x02_dma.h" #define MT76x02_TX_RING_SIZE 512 #define MT76x02_PSD_RING_SIZE 128 #define MT76x02_N_WCIDS 128 #define MT_CALIBRATE_INTERVAL HZ #define MT_MAC_WORK_INTERVAL (HZ / 10) #define MT_WATCHDOG_TIME (HZ / 10) #define MT_TX_HANG_TH 10 #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { s8 high_gain[MT_MAX_CHAINS]; s8 rssi_offset[MT_MAX_CHAINS]; s8 lna_gain; u32 mcu_gain; s16 temp_offset; u8 freq_offset; }; struct mt76x02_calibration { struct mt76x02_rx_freq_cal rx; u8 agc_gain_init[MT_MAX_CHAINS]; u8 agc_gain_cur[MT_MAX_CHAINS]; u16 false_cca; s8 avg_rssi_all; s8 agc_gain_adjust; s8 agc_lowest_gain; s8 low_gain; s8 temp_vco; s8 temp; bool init_cal_done; bool tssi_cal_done; bool tssi_comp_pending; bool dpd_cal_done; bool channel_cal_done; bool gain_init_done; int tssi_target; s8 tssi_dc; }; struct mt76x02_beacon_ops { unsigned int nslots; unsigned int slot_size; void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en); void (*beacon_enable)(struct mt76x02_dev *dev, bool en); }; #define mt76x02_beacon_enable(dev, enable) \ (dev)->beacon_ops->beacon_enable(dev, enable) #define mt76x02_pre_tbtt_enable(dev, enable) \ (dev)->beacon_ops->pre_tbtt_enable(dev, enable) struct mt76x02_rate_power { union { struct { s8 cck[4]; s8 ofdm[8]; s8 ht[16]; s8 vht[2]; }; s8 all[30]; }; }; struct mt76x02_dev { union { /* must be first */ struct mt76_dev mt76; struct mt76_phy mphy; }; struct mac_address macaddr_list[8]; struct mutex phy_mutex; u8 txdone_seq; DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); spinlock_t txstatus_fifo_lock; u32 tx_airtime; u32 ampdu_ref; struct sk_buff *rx_head; struct delayed_work cal_work; struct delayed_work wdt_work; struct hrtimer pre_tbtt_timer; struct work_struct pre_tbtt_work; const struct mt76x02_beacon_ops *beacon_ops; u8 beacon_data_count; u8 tbtt_count; u32 tx_hang_reset; u8 tx_hang_check[4]; u8 beacon_hang_check; u8 mcu_timeout; struct mt76x02_rate_power rate_power; struct mt76x02_calibration cal; int txpower_conf; s8 target_power; s8 target_power_delta[2]; bool enable_tpc; bool no_2ghz; s16 coverage_class; u8 slottime; struct mt76x02_dfs_pattern_detector dfs_pd; /* edcca monitor */ unsigned long ed_trigger_timeout; bool ed_tx_blocked; bool ed_monitor; u8 ed_monitor_enabled; u8 ed_monitor_learning; u8 ed_trigger; u8 ed_silent; ktime_t ed_time; }; extern struct ieee80211_rate mt76x02_rates[12]; int mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params); void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); void mt76x02_wdt_work(struct work_struct *work); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); void mt76x02_set_coverage_class(struct ieee80211_hw *hw, int radio_idx, s16 coverage_class); int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 val); void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed); void mt76x02_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); struct beacon_bc_data { struct mt76x02_dev *dev; struct sk_buff_head q; struct sk_buff *tail[8]; }; void mt76x02_init_beacon_config(struct mt76x02_dev *dev); void mt76x02e_init_beacon_config(struct mt76x02_dev *dev); void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev); void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data, int max_nframes); void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_init_debugfs(struct mt76x02_dev *dev); static inline bool is_mt76x0(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7610 || mt76_chip(&dev->mt76) == 0x7630 || mt76_chip(&dev->mt76) == 0x7650; } static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || mt76_chip(&dev->mt76) == 0x7632 || mt76_chip(&dev->mt76) == 0x7662 || mt76_chip(&dev->mt76) == 0x7602; } static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask); } static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } static inline bool mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) { return __mt76_poll_msec(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100); } static inline struct mt76x02_sta * mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) { struct mt76_wcid *wcid; wcid = __mt76_wcid_ptr(dev, idx); if (!wcid) return NULL; return container_of(wcid, struct mt76x02_sta, wcid); } static inline struct mt76_wcid * mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) { if (!sta) return NULL; if (unicast) return &sta->wcid; else return &sta->vif->group_wcid; } #endif /* __MT76x02_H */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 | /* SPDX-License-Identifier: GPL-2.0 * * page_pool/helpers.h * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> * Copyright (C) 2016 Red Hat, Inc. */ /** * DOC: page_pool allocator * * The page_pool allocator is optimized for recycling page or page fragment used * by skb packet and xdp frame. * * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), * which allocate memory with or without page splitting depending on the * requested memory size. * * If the driver knows that it always requires full pages or its allocations are * always smaller than half a page, it can use one of the more specific API * calls: * * 1. page_pool_alloc_pages(): allocate memory without page splitting when * driver knows that the memory it need is always bigger than half of the page * allocated from page pool. There is no cache line dirtying for 'struct page' * when a page is recycled back to the page pool. * * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver * knows that the memory it need is always smaller than or equal to half of the * page allocated from page pool. Page splitting enables memory saving and thus * avoids TLB/cache miss for data access, but there also is some cost to * implement page splitting, mainly some cache line dirtying/bouncing for * 'struct page' and atomic operation for page->pp_ref_count. * * The API keeps track of in-flight pages, in order to let API users know when * it is safe to free a page_pool object, the API users must call * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or * attach the page_pool object to a page_pool-aware object like skbs marked with * skb_mark_for_recycle(). * * page_pool_put_page() may be called multiple times on the same page if a page * is split into multiple fragments. For the last fragment, it will either * recycle the page, or in case of page->_refcount > 1, it will release the DMA * mapping and in-flight state accounting. * * dma_sync_single_range_for_device() is only called for the last fragment when * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the * last freed fragment to do the sync_for_device operation for all fragments in * the same page when a page is split. The API user must setup pool->p.max_len * and pool->p.offset correctly and ensure that page_pool_put_page() is called * with dma_sync_size being -1 for fragment API. */ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H #include <linux/dma-mapping.h> #include <net/page_pool/types.h> #include <net/net_debug.h> #include <net/netmem.h> #ifdef CONFIG_PAGE_POOL_STATS /* Deprecated driver-facing API, use netlink instead */ int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); bool page_pool_get_stats(const struct page_pool *pool, struct page_pool_stats *stats); #else static inline int page_pool_ethtool_stats_get_count(void) { return 0; } static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) { return data; } static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) { return data; } #endif /** * page_pool_dev_alloc_pages() - allocate a page. * @pool: pool from which to allocate * * Get a page from the page allocator or page_pool caches. */ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_pages(pool, gfp); } /** * page_pool_dev_alloc_frag() - allocate a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: requested size * * Get a page fragment from the page allocator or page_pool caches. * * Return: allocated page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_frag(pool, offset, size, gfp); } static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { unsigned int max_size = PAGE_SIZE << pool->p.order; netmem_ref netmem; if ((*size << 1) > max_size) { *size = max_size; *offset = 0; return page_pool_alloc_netmems(pool, gfp); } netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp); if (unlikely(!netmem)) return 0; /* There is very likely not enough space for another fragment, so append * the remaining size to the current fragment to avoid truesize * underestimate problem. */ if (pool->frag_offset + *size > max_size) { *size = max_size - *offset; pool->frag_offset = max_size; } return netmem; } static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; return page_pool_alloc_netmem(pool, offset, size, gfp); } static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool) { gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; return page_pool_alloc_netmems(pool, gfp); } static inline struct page *page_pool_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp)); } /** * page_pool_dev_alloc() - allocate a page or a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: in as the requested size, out as the allocated size * * Get a page or a page fragment from the page allocator or page_pool caches * depending on the requested size in order to allocate memory with least memory * utilization and performance penalty. * * Return: allocated page or page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc(pool, offset, size, gfp); } static inline void *page_pool_alloc_va(struct page_pool *pool, unsigned int *size, gfp_t gfp) { unsigned int offset; struct page *page; /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); if (unlikely(!page)) return NULL; return page_address(page) + offset; } /** * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its * va. * @pool: pool from which to allocate * @size: in as the requested size, out as the allocated size * * This is just a thin wrapper around the page_pool_alloc() API, and * it returns va of the allocated page or page fragment. * * Return: the va for the allocated page or page fragment, otherwise return NULL. */ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_va(pool, size, gfp); } /** * page_pool_get_dma_dir() - Retrieve the stored DMA direction. * @pool: pool from which page was allocated * * Get the stored dma direction. A driver might decide to store this locally * and avoid the extra cache line from page_pool to determine the direction. */ static inline enum dma_data_direction page_pool_get_dma_dir(const struct page_pool *pool) { return pool->p.dma_dir; } static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) { atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr); } /** * page_pool_fragment_page() - split a fresh page into fragments * @page: page to split * @nr: references to set * * pp_ref_count represents the number of outstanding references to the page, * which will be freed using page_pool APIs (rather than page allocator APIs * like put_page()). Such references are usually held by page_pool-aware * objects like skbs marked for page pool recycling. * * This helper allows the caller to take (set) multiple references to a * freshly allocated page. The page must be freshly allocated (have a * pp_ref_count of 1). This is commonly done by drivers and * "fragment allocators" to save atomic operations - either when they know * upfront how many references they will need; or to take MAX references and * return the unused ones with a single atomic dec(), instead of performing * multiple atomic inc() operations. */ static inline void page_pool_fragment_page(struct page *page, long nr) { page_pool_fragment_netmem(page_to_netmem(page), nr); } static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) { atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem); long ret; /* If nr == pp_ref_count then we have cleared all remaining * references to the page: * 1. 'n == 1': no need to actually overwrite it. * 2. 'n != 1': overwrite it with one, which is the rare case * for pp_ref_count draining. * * The main advantage to doing this is that not only we avoid a atomic * update, as an atomic_read is generally a much cheaper operation than * an atomic update, especially when dealing with a page that may be * referenced by only 2 or 3 users; but also unify the pp_ref_count * handling by ensuring all pages have partitioned into only 1 piece * initially, and only overwrite it when the page is partitioned into * more than one piece. */ if (atomic_long_read(pp_ref_count) == nr) { /* As we have ensured nr is always one for constant case using * the BUILD_BUG_ON(), only need to handle the non-constant case * here for pp_ref_count draining, which is a rare case. */ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); if (!__builtin_constant_p(nr)) atomic_long_set(pp_ref_count, 1); return 0; } ret = atomic_long_sub_return(nr, pp_ref_count); WARN_ON(ret < 0); /* We are the last user here too, reset pp_ref_count back to 1 to * ensure all pages have been partitioned into 1 piece initially, * this should be the rare case when the last two fragment users call * page_pool_unref_page() currently. */ if (unlikely(!ret)) atomic_long_set(pp_ref_count, 1); return ret; } static inline long page_pool_unref_page(struct page *page, long nr) { return page_pool_unref_netmem(page_to_netmem(page), nr); } static inline void page_pool_ref_netmem(netmem_ref netmem) { atomic_long_inc(netmem_get_pp_ref_count_ref(netmem)); } static inline void page_pool_ref_page(struct page *page) { page_pool_ref_netmem(page_to_netmem(page)); } static inline bool page_pool_unref_and_test(netmem_ref netmem) { /* If page_pool_unref_page() returns 0, we were the last user */ return page_pool_unref_netmem(netmem, 1) == 0; } static inline void page_pool_put_netmem(struct page_pool *pool, netmem_ref netmem, unsigned int dma_sync_size, bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL if (!page_pool_unref_and_test(netmem)) return; page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); #endif } /** * page_pool_put_page() - release a reference to a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @dma_sync_size: how much of the page may have been touched by the device * @allow_direct: released by the consumer, allow lockless caching * * The outcome of this depends on the page refcnt. If the driver bumps * the refcnt > 1 this will unmap the page. If the page refcnt is 1 * the allocator owns the page and will try to recycle it in one of the pool * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device * using dma_sync_single_range_for_device(). */ static inline void page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size, allow_direct); } static inline void page_pool_put_full_netmem(struct page_pool *pool, netmem_ref netmem, bool allow_direct) { page_pool_put_netmem(pool, netmem, -1, allow_direct); } /** * page_pool_put_full_page() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @allow_direct: released by the consumer, allow lockless caching * * Similar to page_pool_put_page(), but will DMA sync the entire memory area * as configured in &page_pool_params.max_len. */ static inline void page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct); } /** * page_pool_recycle_direct() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * * Similar to page_pool_put_full_page() but caller must guarantee safe context * (e.g NAPI), since it will recycle the page directly into the pool fast cache. */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { page_pool_put_full_page(pool, page, true); } static inline void page_pool_recycle_direct_netmem(struct page_pool *pool, netmem_ref netmem) { page_pool_put_full_netmem(pool, netmem, true); } #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ (sizeof(dma_addr_t) > sizeof(unsigned long)) /** * page_pool_free_va() - free a va into the page_pool * @pool: pool from which va was allocated * @va: va to be freed * @allow_direct: freed by the consumer, allow lockless caching * * Free a va allocated from page_pool_allo_va(). */ static inline void page_pool_free_va(struct page_pool *pool, void *va, bool allow_direct) { page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); } static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) { dma_addr_t ret = netmem_get_dma_addr(netmem); if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) ret <<= PAGE_SHIFT; return ret; } /** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool * * Fetch the DMA address of the page. The page pool to which the page belongs * must had been created with PP_FLAG_DMA_MAP. */ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { return page_pool_get_dma_addr_netmem(page_to_netmem(page)); } static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool, const dma_addr_t dma_addr, u32 offset, u32 dma_sync_size) { dma_sync_single_range_for_cpu(pool->p.dev, dma_addr, offset + pool->p.offset, dma_sync_size, page_pool_get_dma_dir(pool)); } /** * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW * @pool: &page_pool the @page belongs to * @page: page to sync * @offset: offset from page start to "hard" start if using PP frags * @dma_sync_size: size of the data written to the page * * Can be used as a shorthand to sync Rx pages before accessing them in the * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. * Note that this version performs DMA sync unconditionally, even if the * associated PP doesn't perform sync-for-device. */ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, const struct page *page, u32 offset, u32 dma_sync_size) { __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset, dma_sync_size); } static inline void page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, const netmem_ref netmem, u32 offset, u32 dma_sync_size) { if (!pool->dma_sync_for_cpu) return; __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr_netmem(netmem), offset, dma_sync_size); } static inline void page_pool_get(struct page_pool *pool) { refcount_inc(&pool->user_cnt); } static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); } static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) { if (unlikely(pool->p.nid != new_nid)) page_pool_update_nid(pool, new_nid); } /** * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU * @pool: queried page pool * * Check if page pool will return buffers which are unreadable to the CPU / * kernel. This will only be the case if user space bound a memory provider (mp) * which returns unreadable memory to the queue served by the page pool. * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound * this helper will return false. See also netif_rxq_has_unreadable_mp(). * * Return: true if memory allocated by the page pool may be unreadable */ static inline bool page_pool_is_unreadable(struct page_pool *pool) { return !!pool->mp_ops; } #endif /* _NET_PAGE_POOL_HELPERS_H */ |
| 9 9 9 9 9 9 9 9 9 11 11 11 1 10 1 9 2 11 2 9 9 1 1 1 1 1 1 1 1 9 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 | // SPDX-License-Identifier: GPL-2.0+ /* * USB IR Dongle driver * * Copyright (C) 2001-2002 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2002 Gary Brubaker (xavyer@ix.netcom.com) * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com) * * This driver allows a USB IrDA device to be used as a "dumb" serial device. * This can be useful if you do not have access to a full IrDA stack on the * other side of the connection. If you do have an IrDA stack on both devices, * please use the usb-irda driver, as it contains the proper error checking and * other goodness of a full IrDA stack. * * Portions of this driver were taken from drivers/net/irda/irda-usb.c, which * was written by Roman Weissgaerber <weissg@vienna.at>, Dag Brattli * <dag@brattli.net>, and Jean Tourrilhes <jt@hpl.hp.com> * * See Documentation/usb/usb-serial.rst for more information on using this * driver */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/usb/irda.h> #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Johan Hovold <jhovold@gmail.com>" #define DRIVER_DESC "USB IR Dongle driver" /* if overridden by the user, then use their value for the size of the read and * write urbs */ static int buffer_size; /* if overridden by the user, then use the specified number of XBOFs */ static int xbof = -1; static int ir_startup (struct usb_serial *serial); static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static unsigned int ir_write_room(struct tty_struct *tty); static void ir_write_bulk_callback(struct urb *urb); static void ir_process_read_urb(struct urb *urb); static void ir_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); /* Not that this lot means you can only have one per system */ static u8 ir_baud; static u8 ir_xbof; static u8 ir_add_bof; static const struct usb_device_id ir_id_table[] = { { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */ { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */ { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */ { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, USB_SUBCLASS_IRDA, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ir_id_table); static struct usb_serial_driver ir_device = { .driver = { .name = "ir-usb", }, .description = "IR Dongle", .id_table = ir_id_table, .num_ports = 1, .num_bulk_in = 1, .num_bulk_out = 1, .set_termios = ir_set_termios, .attach = ir_startup, .write = ir_write, .write_room = ir_write_room, .write_bulk_callback = ir_write_bulk_callback, .process_read_urb = ir_process_read_urb, }; static struct usb_serial_driver * const serial_drivers[] = { &ir_device, NULL }; static inline void irda_usb_dump_class_desc(struct usb_serial *serial, struct usb_irda_cs_descriptor *desc) { struct device *dev = &serial->dev->dev; dev_dbg(dev, "bLength=%x\n", desc->bLength); dev_dbg(dev, "bDescriptorType=%x\n", desc->bDescriptorType); dev_dbg(dev, "bcdSpecRevision=%x\n", __le16_to_cpu(desc->bcdSpecRevision)); dev_dbg(dev, "bmDataSize=%x\n", desc->bmDataSize); dev_dbg(dev, "bmWindowSize=%x\n", desc->bmWindowSize); dev_dbg(dev, "bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime); dev_dbg(dev, "wBaudRate=%x\n", __le16_to_cpu(desc->wBaudRate)); dev_dbg(dev, "bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs); dev_dbg(dev, "bIrdaRateSniff=%x\n", desc->bIrdaRateSniff); dev_dbg(dev, "bMaxUnicastList=%x\n", desc->bMaxUnicastList); } /*------------------------------------------------------------------*/ /* * Function irda_usb_find_class_desc(dev, ifnum) * * Returns instance of IrDA class descriptor, or NULL if not found * * The class descriptor is some extra info that IrDA USB devices will * offer to us, describing their IrDA characteristics. We will use that in * irda_usb_init_qos() * * Based on the same function in drivers/net/irda/irda-usb.c */ static struct usb_irda_cs_descriptor * irda_usb_find_class_desc(struct usb_serial *serial, unsigned int ifnum) { struct usb_device *dev = serial->dev; struct usb_irda_cs_descriptor *desc; int ret; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_CS_IRDA_GET_CLASS_DESC, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, ifnum, desc, sizeof(*desc), 1000); dev_dbg(&serial->dev->dev, "%s - ret=%d\n", __func__, ret); if (ret < (int)sizeof(*desc)) { dev_dbg(&serial->dev->dev, "%s - class descriptor read %s (%d)\n", __func__, (ret < 0) ? "failed" : "too short", ret); goto error; } if (desc->bDescriptorType != USB_DT_CS_IRDA) { dev_dbg(&serial->dev->dev, "%s - bad class descriptor type\n", __func__); goto error; } irda_usb_dump_class_desc(serial, desc); return desc; error: kfree(desc); return NULL; } static u8 ir_xbof_change(u8 xbof) { u8 result; /* reference irda-usb.c */ switch (xbof) { case 48: result = 0x10; break; case 28: case 24: result = 0x20; break; default: case 12: result = 0x30; break; case 5: case 6: result = 0x40; break; case 3: result = 0x50; break; case 2: result = 0x60; break; case 1: result = 0x70; break; case 0: result = 0x80; break; } return(result); } static int ir_startup(struct usb_serial *serial) { struct usb_irda_cs_descriptor *irda_desc; int rates; irda_desc = irda_usb_find_class_desc(serial, 0); if (!irda_desc) { dev_err(&serial->dev->dev, "IRDA class descriptor not found, device not bound\n"); return -ENODEV; } rates = le16_to_cpu(irda_desc->wBaudRate); dev_dbg(&serial->dev->dev, "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", __func__, (rates & USB_IRDA_BR_2400) ? " 2400" : "", (rates & USB_IRDA_BR_9600) ? " 9600" : "", (rates & USB_IRDA_BR_19200) ? " 19200" : "", (rates & USB_IRDA_BR_38400) ? " 38400" : "", (rates & USB_IRDA_BR_57600) ? " 57600" : "", (rates & USB_IRDA_BR_115200) ? " 115200" : "", (rates & USB_IRDA_BR_576000) ? " 576000" : "", (rates & USB_IRDA_BR_1152000) ? " 1152000" : "", (rates & USB_IRDA_BR_4000000) ? " 4000000" : ""); switch (irda_desc->bmAdditionalBOFs) { case USB_IRDA_AB_48: ir_add_bof = 48; break; case USB_IRDA_AB_24: ir_add_bof = 24; break; case USB_IRDA_AB_12: ir_add_bof = 12; break; case USB_IRDA_AB_6: ir_add_bof = 6; break; case USB_IRDA_AB_3: ir_add_bof = 3; break; case USB_IRDA_AB_2: ir_add_bof = 2; break; case USB_IRDA_AB_1: ir_add_bof = 1; break; case USB_IRDA_AB_0: ir_add_bof = 0; break; default: break; } kfree(irda_desc); return 0; } static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct urb *urb = NULL; unsigned long flags; int ret; if (port->bulk_out_size == 0) return -EINVAL; if (count == 0) return 0; count = min(count, port->bulk_out_size - 1); spin_lock_irqsave(&port->lock, flags); if (__test_and_clear_bit(0, &port->write_urbs_free)) { urb = port->write_urbs[0]; port->tx_bytes += count; } spin_unlock_irqrestore(&port->lock, flags); if (!urb) return 0; /* * The first byte of the packet we send to the device contains an * outbound header which indicates an additional number of BOFs and * a baud rate change. * * See section 5.4.2.2 of the USB IrDA spec. */ *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud; memcpy(urb->transfer_buffer + 1, buf, count); urb->transfer_buffer_length = count + 1; urb->transfer_flags = URB_ZERO_PACKET; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { dev_err(&port->dev, "failed to submit write urb: %d\n", ret); spin_lock_irqsave(&port->lock, flags); __set_bit(0, &port->write_urbs_free); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); return ret; } return count; } static void ir_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; int status = urb->status; unsigned long flags; spin_lock_irqsave(&port->lock, flags); __set_bit(0, &port->write_urbs_free); port->tx_bytes -= urb->transfer_buffer_length - 1; spin_unlock_irqrestore(&port->lock, flags); switch (status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&port->dev, "write urb stopped: %d\n", status); return; case -EPIPE: dev_err(&port->dev, "write urb stopped: %d\n", status); return; default: dev_err(&port->dev, "nonzero write-urb status: %d\n", status); break; } usb_serial_port_softint(port); } static unsigned int ir_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int count = 0; if (port->bulk_out_size == 0) return 0; if (test_bit(0, &port->write_urbs_free)) count = port->bulk_out_size - 1; return count; } static void ir_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; if (!urb->actual_length) return; /* * The first byte of the packet we get from the device * contains a busy indicator and baud rate change. * See section 5.4.1.2 of the USB IrDA spec. */ if (*data & 0x0f) ir_baud = *data & 0x0f; if (urb->actual_length == 1) return; tty_insert_flip_string(&port->port, data + 1, urb->actual_length - 1); tty_flip_buffer_push(&port->port); } static void ir_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_device *udev = port->serial->dev; unsigned char *transfer_buffer; int actual_length; speed_t baud; int ir_baud; int ret; baud = tty_get_baud_rate(tty); /* * FIXME, we should compare the baud request against the * capability stated in the IR header that we got in the * startup function. */ switch (baud) { case 2400: ir_baud = USB_IRDA_LS_2400; break; case 9600: ir_baud = USB_IRDA_LS_9600; break; case 19200: ir_baud = USB_IRDA_LS_19200; break; case 38400: ir_baud = USB_IRDA_LS_38400; break; case 57600: ir_baud = USB_IRDA_LS_57600; break; case 115200: ir_baud = USB_IRDA_LS_115200; break; case 576000: ir_baud = USB_IRDA_LS_576000; break; case 1152000: ir_baud = USB_IRDA_LS_1152000; break; case 4000000: ir_baud = USB_IRDA_LS_4000000; break; default: ir_baud = USB_IRDA_LS_9600; baud = 9600; } if (xbof == -1) ir_xbof = ir_xbof_change(ir_add_bof); else ir_xbof = ir_xbof_change(xbof) ; /* Only speed changes are supported */ tty_termios_copy_hw(&tty->termios, old_termios); tty_encode_baud_rate(tty, baud, baud); /* * send the baud change out on an "empty" data packet */ transfer_buffer = kmalloc(1, GFP_KERNEL); if (!transfer_buffer) return; *transfer_buffer = ir_xbof | ir_baud; ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, port->bulk_out_endpointAddress), transfer_buffer, 1, &actual_length, 5000); if (ret || actual_length != 1) { if (!ret) ret = -EIO; dev_err(&port->dev, "failed to change line speed: %d\n", ret); } kfree(transfer_buffer); } static int __init ir_init(void) { if (buffer_size) { ir_device.bulk_in_size = buffer_size; ir_device.bulk_out_size = buffer_size; } return usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, ir_id_table); } static void __exit ir_exit(void) { usb_serial_deregister_drivers(serial_drivers); } module_init(ir_init); module_exit(ir_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(xbof, int, 0); MODULE_PARM_DESC(xbof, "Force specific number of XBOFs"); module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "Size of the transfer buffers"); |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 | // SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC driver: major functions * * Copyright (C) 2014-2015 Marvell International Ltd. */ #include <linux/module.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" static int nfcmrvl_nci_open(struct nci_dev *ndev) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); int err; if (test_and_set_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) return 0; /* Reset possible fault of previous session */ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); err = priv->if_ops->nci_open(priv); if (err) clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags); return err; } static int nfcmrvl_nci_close(struct nci_dev *ndev) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); if (!test_and_clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) return 0; priv->if_ops->nci_close(priv); return 0; } static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); nfc_info(priv->dev, "send entry, len %d\n", skb->len); skb->dev = (void *)ndev; if (priv->config.hci_muxed) { unsigned char *hdr; unsigned char len = skb->len; hdr = skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE); hdr[0] = NFCMRVL_HCI_COMMAND_CODE; hdr[1] = NFCMRVL_HCI_OGF; hdr[2] = NFCMRVL_HCI_OCF; hdr[3] = len; } return priv->if_ops->nci_send(priv, skb); } static int nfcmrvl_nci_setup(struct nci_dev *ndev) { __u8 val = 1; nci_set_config(ndev, NFCMRVL_PB_BAIL_OUT, 1, &val); return 0; } static int nfcmrvl_nci_fw_download(struct nci_dev *ndev, const char *firmware_name) { return nfcmrvl_fw_dnld_start(ndev, firmware_name); } static const struct nci_ops nfcmrvl_nci_ops = { .open = nfcmrvl_nci_open, .close = nfcmrvl_nci_close, .send = nfcmrvl_nci_send, .setup = nfcmrvl_nci_setup, .fw_download = nfcmrvl_nci_fw_download, }; struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, void *drv_data, const struct nfcmrvl_if_ops *ops, struct device *dev, const struct nfcmrvl_platform_data *pdata) { struct nfcmrvl_private *priv; int rc; int headroom; int tailroom; u32 protocols; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->drv_data = drv_data; priv->if_ops = ops; priv->dev = dev; priv->phy = phy; memcpy(&priv->config, pdata, sizeof(*pdata)); if (gpio_is_valid(priv->config.reset_n_io)) { rc = gpio_request_one(priv->config.reset_n_io, GPIOF_OUT_INIT_LOW, "nfcmrvl_reset_n"); if (rc < 0) { priv->config.reset_n_io = -EINVAL; nfc_err(dev, "failed to request reset_n io\n"); } } if (phy == NFCMRVL_PHY_SPI) { headroom = NCI_SPI_HDR_LEN; tailroom = 1; } else headroom = tailroom = 0; if (priv->config.hci_muxed) headroom += NFCMRVL_HCI_EVENT_HEADER_SIZE; protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK; priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, headroom, tailroom); if (!priv->ndev) { nfc_err(dev, "nci_allocate_device failed\n"); rc = -ENOMEM; goto error_free_gpio; } rc = nfcmrvl_fw_dnld_init(priv); if (rc) { nfc_err(dev, "failed to initialize FW download %d\n", rc); goto error_free_dev; } nci_set_drvdata(priv->ndev, priv); rc = nci_register_device(priv->ndev); if (rc) { nfc_err(dev, "nci_register_device failed %d\n", rc); goto error_fw_dnld_deinit; } /* Ensure that controller is powered off */ nfcmrvl_chip_halt(priv); nfc_info(dev, "registered with nci successfully\n"); return priv; error_fw_dnld_deinit: nfcmrvl_fw_dnld_deinit(priv); error_free_dev: nci_free_device(priv->ndev); error_free_gpio: if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); kfree(priv); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(nfcmrvl_nci_register_dev); void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; nci_unregister_device(ndev); if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); nfcmrvl_fw_dnld_deinit(priv); if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); nci_free_device(ndev); kfree(priv); } EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev); int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb) { if (priv->config.hci_muxed) { if (skb->data[0] == NFCMRVL_HCI_EVENT_CODE && skb->data[1] == NFCMRVL_HCI_NFC_EVENT_CODE) { /* Data packet, let's extract NCI payload */ skb_pull(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE); } else { /* Skip this packet */ kfree_skb(skb); return 0; } } if (priv->ndev->nfc_dev->fw_download_in_progress) { nfcmrvl_fw_dnld_recv_frame(priv, skb); return 0; } if (test_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) nci_recv_frame(priv->ndev, skb); else { /* Drop this packet since nobody wants it */ kfree_skb(skb); return 0; } return 0; } EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame); void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) { /* Reset possible fault of previous session */ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); if (gpio_is_valid(priv->config.reset_n_io)) { nfc_info(priv->dev, "reset the chip\n"); gpio_set_value(priv->config.reset_n_io, 0); usleep_range(5000, 10000); gpio_set_value(priv->config.reset_n_io, 1); } else nfc_info(priv->dev, "no reset available on this interface\n"); } void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) { if (gpio_is_valid(priv->config.reset_n_io)) gpio_set_value(priv->config.reset_n_io, 0); } int nfcmrvl_parse_dt(struct device_node *node, struct nfcmrvl_platform_data *pdata) { int reset_n_io; reset_n_io = of_get_named_gpio(node, "reset-n-io", 0); if (reset_n_io < 0) { pr_info("no reset-n-io config\n"); } else if (!gpio_is_valid(reset_n_io)) { pr_err("invalid reset-n-io GPIO\n"); return reset_n_io; } pdata->reset_n_io = reset_n_io; pdata->hci_muxed = of_property_read_bool(node, "hci-muxed"); return 0; } EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC driver"); MODULE_LICENSE("GPL v2"); |
| 190 10 10 189 189 189 218 216 217 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core ACPI support code * * Copyright (C) 2014 Intel Corp, Author: Lan Tianyu <tianyu.lan@intel.com> */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include "i2c-core.h" struct i2c_acpi_handler_data { struct acpi_connection_info info; struct i2c_adapter *adapter; }; struct gsb_buffer { u8 status; u8 len; union { u16 wdata; u8 bdata; DECLARE_FLEX_ARRAY(u8, data); }; } __packed; struct i2c_acpi_lookup { struct i2c_board_info *info; acpi_handle adapter_handle; acpi_handle device_handle; acpi_handle search_handle; int n; int index; u32 speed; u32 min_speed; u32 force_speed; }; /** * i2c_acpi_get_i2c_resource - Gets I2cSerialBus resource if type matches * @ares: ACPI resource * @i2c: Pointer to I2cSerialBus resource will be returned here * * Checks if the given ACPI resource is of type I2cSerialBus. * In this case, returns a pointer to it to the caller. * * Returns true if resource type is of I2cSerialBus, otherwise false. */ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) { struct acpi_resource_i2c_serialbus *sb; if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) return false; sb = &ares->data.i2c_serial_bus; if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) return false; *i2c = sb; return true; } EXPORT_SYMBOL_GPL(i2c_acpi_get_i2c_resource); static int i2c_acpi_resource_count(struct acpi_resource *ares, void *data) { struct acpi_resource_i2c_serialbus *sb; int *count = data; if (i2c_acpi_get_i2c_resource(ares, &sb)) *count = *count + 1; return 1; } /** * i2c_acpi_client_count - Count the number of I2cSerialBus resources * @adev: ACPI device * * Returns the number of I2cSerialBus resources in the ACPI-device's * resource-list; or a negative error code. */ int i2c_acpi_client_count(struct acpi_device *adev) { int ret, count = 0; LIST_HEAD(r); ret = acpi_dev_get_resources(adev, &r, i2c_acpi_resource_count, &count); if (ret < 0) return ret; acpi_dev_free_resource_list(&r); return count; } EXPORT_SYMBOL_GPL(i2c_acpi_client_count); static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) { struct i2c_acpi_lookup *lookup = data; struct i2c_board_info *info = lookup->info; struct acpi_resource_i2c_serialbus *sb; acpi_status status; if (info->addr || !i2c_acpi_get_i2c_resource(ares, &sb)) return 1; if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; status = acpi_get_handle(lookup->device_handle, sb->resource_source.string_ptr, &lookup->adapter_handle); if (ACPI_FAILURE(status)) return 1; info->addr = sb->slave_address; lookup->speed = sb->connection_speed; if (sb->access_mode == ACPI_I2C_10BIT_MODE) info->flags |= I2C_CLIENT_TEN; return 1; } static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = { /* * ACPI video acpi_devices, which are handled by the acpi-video driver * sometimes contain a SERIAL_TYPE_I2C ACPI resource, ignore these. */ { ACPI_VIDEO_HID, 0 }, {} }; struct i2c_acpi_irq_context { int irq; bool wake_capable; }; static int i2c_acpi_do_lookup(struct acpi_device *adev, struct i2c_acpi_lookup *lookup) { struct i2c_board_info *info = lookup->info; struct list_head resource_list; int ret; if (acpi_bus_get_status(adev)) return -EINVAL; if (!acpi_dev_ready_for_enumeration(adev)) return -ENODEV; if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) return -ENODEV; memset(info, 0, sizeof(*info)); lookup->device_handle = acpi_device_handle(adev); /* Look up for I2cSerialBus resource */ INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, lookup); acpi_dev_free_resource_list(&resource_list); if (ret < 0 || !info->addr) return -EINVAL; return 0; } static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data) { struct i2c_acpi_irq_context *irq_ctx = data; struct resource r; if (irq_ctx->irq > 0) return 1; if (!acpi_dev_resource_interrupt(ares, 0, &r)) return 1; irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1); irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE; return 1; /* No need to add resource to the list */ } /** * i2c_acpi_get_irq - get device IRQ number from ACPI * @client: Pointer to the I2C client device * @wake_capable: Set to true if the IRQ is wake capable * * Find the IRQ number used by a specific client device. * * Return: The IRQ number or an error code. */ int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); struct list_head resource_list; struct i2c_acpi_irq_context irq_ctx = { .irq = -ENOENT, }; int ret; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_add_irq_resource, &irq_ctx); if (ret < 0) return ret; acpi_dev_free_resource_list(&resource_list); if (irq_ctx.irq == -ENOENT) irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable); if (irq_ctx.irq < 0) return irq_ctx.irq; if (wake_capable) *wake_capable = irq_ctx.wake_capable; return irq_ctx.irq; } static int i2c_acpi_get_info(struct acpi_device *adev, struct i2c_board_info *info, struct i2c_adapter *adapter, acpi_handle *adapter_handle) { struct i2c_acpi_lookup lookup; int ret; memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.index = -1; if (acpi_device_enumerated(adev)) return -EINVAL; ret = i2c_acpi_do_lookup(adev, &lookup); if (ret) return ret; if (adapter) { /* The adapter must match the one in I2cSerialBus() connector */ if (!device_match_acpi_handle(&adapter->dev, lookup.adapter_handle)) return -ENODEV; } else { struct acpi_device *adapter_adev; /* The adapter must be present */ adapter_adev = acpi_fetch_acpi_dev(lookup.adapter_handle); if (!adapter_adev) return -ENODEV; if (acpi_bus_get_status(adapter_adev) || !adapter_adev->status.present) return -ENODEV; } info->fwnode = acpi_fwnode_handle(adev); if (adapter_handle) *adapter_handle = lookup.adapter_handle; acpi_set_modalias(adev, dev_name(&adev->dev), info->type, sizeof(info->type)); return 0; } static void i2c_acpi_register_device(struct i2c_adapter *adapter, struct acpi_device *adev, struct i2c_board_info *info) { /* * Skip registration on boards where the ACPI tables are * known to contain bogus I2C devices. */ if (acpi_quirk_skip_i2c_client_enumeration(adev)) return; adev->power.flags.ignore_parent = true; acpi_device_set_enumerated(adev); if (IS_ERR(i2c_new_client_device(adapter, info))) adev->power.flags.ignore_parent = false; } static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_adapter *adapter = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct i2c_board_info info; if (!adev || i2c_acpi_get_info(adev, &info, adapter, NULL)) return AE_OK; i2c_acpi_register_device(adapter, adev, &info); return AE_OK; } #define I2C_ACPI_MAX_SCAN_DEPTH 32 /** * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter * @adap: pointer to adapter * * Enumerate all I2C slave devices behind this adapter by walking the ACPI * namespace. When a device is found it will be added to the Linux device * model and bound to the corresponding ACPI handle. */ void i2c_acpi_register_devices(struct i2c_adapter *adap) { struct acpi_device *adev; acpi_status status; if (!has_acpi_companion(&adap->dev)) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_add_device, NULL, adap, NULL); if (ACPI_FAILURE(status)) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); if (!adap->dev.parent) return; adev = ACPI_COMPANION(adap->dev.parent); if (!adev) return; acpi_dev_clear_dependencies(adev); } static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { /* * These Silead touchscreen controllers only work at 400KHz, for * some reason they do not work at 100KHz. On some devices the ACPI * tables list another device at their bus as only being capable * of 100KHz, testing has shown that these other devices work fine * at 400KHz (as can be expected of any recent i2c hw) so we force * the speed of the bus to 400 KHz if a Silead device is present. */ { "MSSL1680", 0 }, {} }; static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = { /* * When a 400KHz freq is used on this model of ELAN touchpad in Linux, * excessive smoothing (similar to when the touchpad's firmware detects * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo * V15 G4) ACPI tables specify a 400KHz frequency for this device and * some I2C busses (e.g, Designware I2C) default to a 400KHz freq, * force the speed to 100KHz as a workaround. * * For future investigation: This problem may be related to the default * HCNT/LCNT values given by some busses' drivers, because they are not * specified in the aforementioned devices' ACPI tables, and because * the device works without issues on Windows at what is expected to be * a 400KHz frequency. The root cause of the issue is not known. */ { "DLL0945", 0 }, { "ELAN06FA", 0 }, {} }; static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_acpi_lookup *lookup = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); if (!adev || i2c_acpi_do_lookup(adev, lookup)) return AE_OK; if (lookup->search_handle != lookup->adapter_handle) return AE_OK; if (lookup->speed <= lookup->min_speed) lookup->min_speed = lookup->speed; if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) lookup->force_speed = I2C_MAX_FAST_MODE_FREQ; if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0) lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ; return AE_OK; } /** * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI * @dev: The device owning the bus * * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves * devices connected to this bus and use the speed of slowest device. * * Returns the speed in Hz or zero */ u32 i2c_acpi_find_bus_speed(struct device *dev) { struct i2c_acpi_lookup lookup; struct i2c_board_info dummy; acpi_status status; if (!has_acpi_companion(dev)) return 0; memset(&lookup, 0, sizeof(lookup)); lookup.search_handle = ACPI_HANDLE(dev); lookup.min_speed = UINT_MAX; lookup.info = &dummy; lookup.index = -1; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_lookup_speed, NULL, &lookup, NULL); if (ACPI_FAILURE(status)) { dev_warn(dev, "unable to find I2C bus speed from ACPI\n"); return 0; } if (lookup.force_speed) { if (lookup.force_speed != lookup.min_speed) dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n", lookup.min_speed, lookup.force_speed); return lookup.force_speed; } else if (lookup.min_speed != UINT_MAX) { return lookup.min_speed; } else { return 0; } } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { struct i2c_adapter *adapter; struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, handle, device_match_acpi_handle); if (!dev) return NULL; adapter = i2c_verify_adapter(dev); if (!adapter) put_device(dev); return adapter; } EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev)); } static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev) { return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev)); } static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, void *arg) { struct acpi_device *adev = arg; struct i2c_board_info info; acpi_handle adapter_handle; struct i2c_adapter *adapter; struct i2c_client *client; switch (value) { case ACPI_RECONFIG_DEVICE_ADD: if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle)) break; adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); if (!adapter) break; i2c_acpi_register_device(adapter, adev, &info); put_device(&adapter->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) break; client = i2c_acpi_find_client_by_adev(adev); if (client) { i2c_unregister_device(client); put_device(&client->dev); } adapter = i2c_acpi_find_adapter_by_adev(adev); if (adapter) { acpi_unbind_one(&adapter->dev); put_device(&adapter->dev); } break; } return NOTIFY_OK; } struct notifier_block i2c_acpi_notifier = { .notifier_call = i2c_acpi_notify, }; /** * i2c_acpi_new_device_by_fwnode - Create i2c-client for the Nth I2cSerialBus resource * @fwnode: fwnode with the ACPI resources to get the client from * @index: Index of ACPI resource to get * @info: describes the I2C device; note this is modified (addr gets set) * Context: can sleep * * By default the i2c subsys creates an i2c-client for the first I2cSerialBus * resource of an acpi_device, but some acpi_devices have multiple I2cSerialBus * resources, in that case this function can be used to create an i2c-client * for other I2cSerialBus resources in the Current Resource Settings table. * * Also see i2c_new_client_device, which this function calls to create the * i2c-client. * * Returns a pointer to the new i2c-client, or error pointer in case of failure. * Specifically, -EPROBE_DEFER is returned if the adapter is not found. */ struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode, int index, struct i2c_board_info *info) { struct i2c_acpi_lookup lookup; struct i2c_adapter *adapter; struct acpi_device *adev; LIST_HEAD(resource_list); int ret; adev = to_acpi_device_node(fwnode); if (!adev) return ERR_PTR(-ENODEV); memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.device_handle = acpi_device_handle(adev); lookup.index = index; ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, &lookup); if (ret < 0) return ERR_PTR(ret); acpi_dev_free_resource_list(&resource_list); if (!info->addr) return ERR_PTR(-EADDRNOTAVAIL); adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle); if (!adapter) return ERR_PTR(-EPROBE_DEFER); return i2c_new_client_device(adapter, info); } EXPORT_SYMBOL_GPL(i2c_acpi_new_device_by_fwnode); bool i2c_acpi_waive_d0_probe(struct device *dev) { struct i2c_driver *driver = to_i2c_driver(dev->driver); struct acpi_device *adev = ACPI_COMPANION(dev); return driver->flags & I2C_DRV_ACPI_WAIVE_D0_PROBE && adev && adev->power.state_for_enumeration >= adev->power.state; } EXPORT_SYMBOL_GPL(i2c_acpi_waive_d0_probe); #ifdef CONFIG_ACPI_I2C_OPREGION static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[2]; int ret; u8 *buffer; buffer = kzalloc(data_len, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = 1; msgs[0].buf = &cmd; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = data_len; msgs[1].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) { /* Getting a NACK is unfortunately normal with some DSTDs */ if (ret == -EREMOTEIO) dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); else dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); /* 2 transfers must have completed successfully */ } else if (ret == 2) { memcpy(data, buffer, data_len); ret = 0; } else { ret = -EIO; } kfree(buffer); return ret; } static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[1]; u8 *buffer; int ret = AE_OK; buffer = kzalloc(data_len + 1, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; buffer[0] = cmd; memcpy(buffer + 1, data, data_len); msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = data_len + 1; msgs[0].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); kfree(buffer); if (ret < 0) { dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); return ret; } /* 1 transfer must have completed successfully */ return (ret == 1) ? 0 : -EIO; } static acpi_status i2c_acpi_space_handler(u32 function, acpi_physical_address command, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct gsb_buffer *gsb = (struct gsb_buffer *)value64; struct i2c_acpi_handler_data *data = handler_context; struct acpi_connection_info *info = &data->info; struct acpi_resource_i2c_serialbus *sb; struct i2c_adapter *adapter = data->adapter; struct i2c_client *client; struct acpi_resource *ares; u32 accessor_type = function >> 16; u8 action = function & ACPI_IO_MASK; acpi_status ret; int status; ret = acpi_buffer_to_resource(info->connection, info->length, &ares); if (ACPI_FAILURE(ret)) return ret; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { ret = AE_NO_MEMORY; goto err; } if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) { ret = AE_BAD_PARAMETER; goto err; } client->adapter = adapter; client->addr = sb->slave_address; if (sb->access_mode == ACPI_I2C_10BIT_MODE) client->flags |= I2C_CLIENT_TEN; switch (accessor_type) { case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: if (action == ACPI_READ) { status = i2c_smbus_read_byte(client); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte(client, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BYTE: if (action == ACPI_READ) { status = i2c_smbus_read_byte_data(client, command); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte_data(client, command, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_WORD: if (action == ACPI_READ) { status = i2c_smbus_read_word_data(client, command); if (status >= 0) { gsb->wdata = status; status = 0; } } else { status = i2c_smbus_write_word_data(client, command, gsb->wdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BLOCK: if (action == ACPI_READ) { status = i2c_smbus_read_block_data(client, command, gsb->data); if (status >= 0) { gsb->len = status; status = 0; } } else { status = i2c_smbus_write_block_data(client, command, gsb->len, gsb->data); } break; case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: if (action == ACPI_READ) { status = acpi_gsb_i2c_read_bytes(client, command, gsb->data, info->access_length); } else { status = acpi_gsb_i2c_write_bytes(client, command, gsb->data, info->access_length); } break; default: dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", accessor_type, client->addr); ret = AE_BAD_PARAMETER; goto err; } gsb->status = status; err: kfree(client); ACPI_FREE(ares); return ret; } int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return -ENODEV; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return -ENODEV; data = kzalloc(sizeof(struct i2c_acpi_handler_data), GFP_KERNEL); if (!data) return -ENOMEM; data->adapter = adapter; status = acpi_bus_attach_private_data(handle, (void *)data); if (ACPI_FAILURE(status)) { kfree(data); return -ENOMEM; } status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler, NULL, data); if (ACPI_FAILURE(status)) { dev_err(&adapter->dev, "Error installing i2c space handler\n"); acpi_bus_detach_private_data(handle); kfree(data); return -ENOMEM; } return 0; } void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return; acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler); status = acpi_bus_get_private_data(handle, (void **)&data); if (ACPI_SUCCESS(status)) kfree(data); acpi_bus_detach_private_data(handle); } #endif /* CONFIG_ACPI_I2C_OPREGION */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NAMEI_H #define _LINUX_NAMEI_H #include <linux/fs.h> #include <linux/kernel.h> #include <linux/path.h> #include <linux/fcntl.h> #include <linux/errno.h> #include <linux/fs_struct.h> enum { MAX_NESTED_LINKS = 8 }; #define MAXSYMLINKS 40 /* * Type of the last component on LOOKUP_PARENT */ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; /* pathwalk mode */ #define LOOKUP_FOLLOW BIT(0) /* follow links at the end */ #define LOOKUP_DIRECTORY BIT(1) /* require a directory */ #define LOOKUP_AUTOMOUNT BIT(2) /* force terminal automount */ #define LOOKUP_EMPTY BIT(3) /* accept empty path [user_... only] */ #define LOOKUP_LINKAT_EMPTY BIT(4) /* Linkat request with empty path. */ #define LOOKUP_DOWN BIT(5) /* follow mounts in the starting point */ #define LOOKUP_MOUNTPOINT BIT(6) /* follow mounts in the end */ #define LOOKUP_REVAL BIT(7) /* tell ->d_revalidate() to trust no cache */ #define LOOKUP_RCU BIT(8) /* RCU pathwalk mode; semi-internal */ #define LOOKUP_CACHED BIT(9) /* Only do cached lookup */ #define LOOKUP_PARENT BIT(10) /* Looking up final parent in path */ /* 5 spare bits for pathwalk */ /* These tell filesystem methods that we are dealing with the final component... */ #define LOOKUP_OPEN BIT(16) /* ... in open */ #define LOOKUP_CREATE BIT(17) /* ... in object creation */ #define LOOKUP_EXCL BIT(18) /* ... in target must not exist */ #define LOOKUP_RENAME_TARGET BIT(19) /* ... in destination of rename() */ /* 4 spare bits for intent */ /* Scoping flags for lookup. */ #define LOOKUP_NO_SYMLINKS BIT(24) /* No symlink crossing. */ #define LOOKUP_NO_MAGICLINKS BIT(25) /* No nd_jump_link() crossing. */ #define LOOKUP_NO_XDEV BIT(26) /* No mountpoint crossing. */ #define LOOKUP_BENEATH BIT(27) /* No escaping from starting point. */ #define LOOKUP_IN_ROOT BIT(28) /* Treat dirfd as fs root. */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) /* 3 spare bits for scoping */ extern int path_pts(struct path *path); extern int user_path_at(int, const char __user *, unsigned, struct path *); struct dentry *lookup_one_qstr_excl(const struct qstr *name, struct dentry *base, unsigned int flags); extern int kern_path(const char *, unsigned, struct path *); struct dentry *kern_path_parent(const char *name, struct path *parent); extern struct dentry *start_creating_path(int, const char *, struct path *, unsigned int); extern struct dentry *start_creating_user_path(int, const char __user *, struct path *, unsigned int); extern void end_creating_path(const struct path *, struct dentry *); extern struct dentry *start_removing_path(const char *, struct path *); extern struct dentry *start_removing_user_path_at(int , const char __user *, struct path *); static inline void end_removing_path(const struct path *path , struct dentry *dentry) { end_creating_path(path, dentry); } int vfs_path_parent_lookup(struct filename *filename, unsigned int flags, struct path *parent, struct qstr *last, int *type, const struct path *root); int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *); extern struct dentry *lookup_noperm(struct qstr *, struct dentry *); extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *); extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *); struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *); struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, struct qstr *name, struct dentry *base); struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap, struct qstr *name, struct dentry *base); struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap, struct qstr *name, struct dentry *base); struct dentry *start_creating(struct mnt_idmap *idmap, struct dentry *parent, struct qstr *name); struct dentry *start_removing(struct mnt_idmap *idmap, struct dentry *parent, struct qstr *name); struct dentry *start_creating_killable(struct mnt_idmap *idmap, struct dentry *parent, struct qstr *name); struct dentry *start_removing_killable(struct mnt_idmap *idmap, struct dentry *parent, struct qstr *name); struct dentry *start_creating_noperm(struct dentry *parent, struct qstr *name); struct dentry *start_removing_noperm(struct dentry *parent, struct qstr *name); struct dentry *start_creating_dentry(struct dentry *parent, struct dentry *child); struct dentry *start_removing_dentry(struct dentry *parent, struct dentry *child); /* end_creating - finish action started with start_creating * @child: dentry returned by start_creating() or vfs_mkdir() * * Unlock and release the child. This can be called after * start_creating() whether that function succeeded or not, * but it is not needed on failure. * * If vfs_mkdir() was called then the value returned from that function * should be given for @child rather than the original dentry, as vfs_mkdir() * may have provided a new dentry. * * * If vfs_mkdir() was not called, then @child will be a valid dentry and * @parent will be ignored. */ static inline void end_creating(struct dentry *child) { end_dirop(child); } /* end_creating_keep - finish action started with start_creating() and return result * @child: dentry returned by start_creating() or vfs_mkdir() * * Unlock and return the child. This can be called after * start_creating() whether that function succeeded or not, * but it is not needed on failure. * * If vfs_mkdir() was called then the value returned from that function * should be given for @child rather than the original dentry, as vfs_mkdir() * may have provided a new dentry. * * Returns: @child, which may be a dentry or an error. * */ static inline struct dentry *end_creating_keep(struct dentry *child) { if (!IS_ERR(child)) dget(child); end_dirop(child); return child; } /** * end_removing - finish action started with start_removing * @child: dentry returned by start_removing() * @parent: dentry given to start_removing() * * Unlock and release the child. * * This is identical to end_dirop(). It can be passed the result of * start_removing() whether that was successful or not, but it not needed * if start_removing() failed. */ static inline void end_removing(struct dentry *child) { end_dirop(child); } extern int follow_down_one(struct path *); extern int follow_down(struct path *path, unsigned int flags); extern int follow_up(struct path *); extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern struct dentry *lock_rename_child(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); int start_renaming(struct renamedata *rd, int lookup_flags, struct qstr *old_last, struct qstr *new_last); int start_renaming_dentry(struct renamedata *rd, int lookup_flags, struct dentry *old_dentry, struct qstr *new_last); int start_renaming_two_dentries(struct renamedata *rd, struct dentry *old_dentry, struct dentry *new_dentry); void end_renaming(struct renamedata *rd); /** * mode_strip_umask - handle vfs umask stripping * @dir: parent directory of the new inode * @mode: mode of the new inode to be created in @dir * * In most filesystems, umask stripping depends on whether or not the * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask * stripping is done directly in here. If the filesystem does support POSIX * ACLs umask stripping is deferred until the filesystem calls * posix_acl_create(). * * Some filesystems (like NFSv4) also want to avoid umask stripping by the * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK * to get this effect without declaring that they support POSIX ACLs. * * Returns: mode */ static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode) { if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK)) mode &= ~current_umask(); return mode; } extern int __must_check nd_jump_link(const struct path *path); static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) { ((char *) name)[min(len, maxlen)] = '\0'; } /** * retry_estale - determine whether the caller should retry an operation * @error: the error that would currently be returned * @flags: flags being used for next lookup attempt * * Check to see if the error code was -ESTALE, and then determine whether * to retry the call based on whether "flags" already has LOOKUP_REVAL set. * * Returns true if the caller should try the operation again. */ static inline bool retry_estale(const long error, const unsigned int flags) { return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL)); } #endif /* _LINUX_NAMEI_H */ |
| 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 | // SPDX-License-Identifier: GPL-2.0 /* * USB Attached SCSI * Note that this is not the same as the USB Mass Storage driver * * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016 * Copyright Matthew Wilcox for Intel Corp, 2010 * Copyright Sarah Sharp for Intel Corp, 2010 */ #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb_usual.h> #include <linux/usb/hcd.h> #include <linux/usb/storage.h> #include <linux/usb/uas.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "uas-detect.h" #include "scsiglue.h" #define MAX_CMNDS 256 struct uas_dev_info { struct usb_interface *intf; struct usb_device *udev; struct usb_anchor cmd_urbs; struct usb_anchor sense_urbs; struct usb_anchor data_urbs; u64 flags; int qdepth, resetting; unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe; unsigned use_streams:1; unsigned shutdown:1; struct scsi_cmnd *cmnd[MAX_CMNDS]; spinlock_t lock; struct work_struct work; struct work_struct scan_work; /* for async scanning */ }; enum { SUBMIT_STATUS_URB = BIT(1), ALLOC_DATA_IN_URB = BIT(2), SUBMIT_DATA_IN_URB = BIT(3), ALLOC_DATA_OUT_URB = BIT(4), SUBMIT_DATA_OUT_URB = BIT(5), ALLOC_CMD_URB = BIT(6), SUBMIT_CMD_URB = BIT(7), COMMAND_INFLIGHT = BIT(8), DATA_IN_URB_INFLIGHT = BIT(9), DATA_OUT_URB_INFLIGHT = BIT(10), COMMAND_ABORTED = BIT(11), IS_IN_WORK_LIST = BIT(12), }; /* Overrides scsi_pointer */ struct uas_cmd_info { unsigned int state; unsigned int uas_tag; struct urb *cmd_urb; struct urb *data_in_urb; struct urb *data_out_urb; }; /* I hate forward declarations, but I actually have a loop */ static int uas_submit_urbs(struct scsi_cmnd *cmnd, struct uas_dev_info *devinfo); static void uas_do_work(struct work_struct *work); static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller); static void uas_free_streams(struct uas_dev_info *devinfo); static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status); /* * This driver needs its own workqueue, as we need to control memory allocation. * * In the course of error handling and power management uas_wait_for_pending_cmnds() * needs to flush pending work items. In these contexts we cannot allocate memory * by doing block IO as we would deadlock. For the same reason we cannot wait * for anything allocating memory not heeding these constraints. * * So we have to control all work items that can be on the workqueue we flush. * Hence we cannot share a queue and need our own. */ static struct workqueue_struct *workqueue; static void uas_do_work(struct work_struct *work) { struct uas_dev_info *devinfo = container_of(work, struct uas_dev_info, work); struct uas_cmd_info *cmdinfo; struct scsi_cmnd *cmnd; unsigned long flags; int i, err; spin_lock_irqsave(&devinfo->lock, flags); if (devinfo->resetting) goto out; for (i = 0; i < devinfo->qdepth; i++) { if (!devinfo->cmnd[i]) continue; cmnd = devinfo->cmnd[i]; cmdinfo = scsi_cmd_priv(cmnd); if (!(cmdinfo->state & IS_IN_WORK_LIST)) continue; err = uas_submit_urbs(cmnd, cmnd->device->hostdata); if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else queue_work(workqueue, &devinfo->work); } out: spin_unlock_irqrestore(&devinfo->lock, flags); } static void uas_scan_work(struct work_struct *work) { struct uas_dev_info *devinfo = container_of(work, struct uas_dev_info, scan_work); struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf); dev_dbg(&devinfo->intf->dev, "starting scan\n"); scsi_scan_host(shost); dev_dbg(&devinfo->intf->dev, "scan complete\n"); } static void uas_add_work(struct scsi_cmnd *cmnd) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct uas_dev_info *devinfo = cmnd->device->hostdata; lockdep_assert_held(&devinfo->lock); cmdinfo->state |= IS_IN_WORK_LIST; queue_work(workqueue, &devinfo->work); } static void uas_zap_pending(struct uas_dev_info *devinfo, int result) { struct uas_cmd_info *cmdinfo; struct scsi_cmnd *cmnd; unsigned long flags; int i, err; spin_lock_irqsave(&devinfo->lock, flags); for (i = 0; i < devinfo->qdepth; i++) { if (!devinfo->cmnd[i]) continue; cmnd = devinfo->cmnd[i]; cmdinfo = scsi_cmd_priv(cmnd); uas_log_cmd_state(cmnd, __func__, 0); /* Sense urbs were killed, clear COMMAND_INFLIGHT manually */ cmdinfo->state &= ~COMMAND_INFLIGHT; cmnd->result = result << 16; err = uas_try_complete(cmnd, __func__); WARN_ON(err != 0); } spin_unlock_irqrestore(&devinfo->lock, flags); } static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd) { struct sense_iu *sense_iu = urb->transfer_buffer; struct scsi_device *sdev = cmnd->device; if (urb->actual_length > 16) { unsigned len = be16_to_cpup(&sense_iu->len); if (len + 16 != urb->actual_length) { int newlen = min(len + 16, urb->actual_length) - 16; if (newlen < 0) newlen = 0; sdev_printk(KERN_INFO, sdev, "%s: urb length %d " "disagrees with IU sense data length %d, " "using %d bytes of sense data\n", __func__, urb->actual_length, len, newlen); len = newlen; } memcpy(cmnd->sense_buffer, sense_iu->sense, len); } cmnd->result = sense_iu->status; } static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status) { struct uas_cmd_info *ci = scsi_cmd_priv(cmnd); if (status == -ENODEV) /* too late */ return; scmd_printk(KERN_INFO, cmnd, "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", prefix, status, ci->uas_tag, (ci->state & SUBMIT_STATUS_URB) ? " s-st" : "", (ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "", (ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "", (ci->state & ALLOC_DATA_OUT_URB) ? " a-out" : "", (ci->state & SUBMIT_DATA_OUT_URB) ? " s-out" : "", (ci->state & ALLOC_CMD_URB) ? " a-cmd" : "", (ci->state & SUBMIT_CMD_URB) ? " s-cmd" : "", (ci->state & COMMAND_INFLIGHT) ? " CMD" : "", (ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "", (ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "", (ci->state & COMMAND_ABORTED) ? " abort" : "", (ci->state & IS_IN_WORK_LIST) ? " work" : ""); scsi_print_command(cmnd); } static void uas_free_unsubmitted_urbs(struct scsi_cmnd *cmnd) { struct uas_cmd_info *cmdinfo; if (!cmnd) return; cmdinfo = scsi_cmd_priv(cmnd); if (cmdinfo->state & SUBMIT_CMD_URB) usb_free_urb(cmdinfo->cmd_urb); /* data urbs may have never gotten their submit flag set */ if (!(cmdinfo->state & DATA_IN_URB_INFLIGHT)) usb_free_urb(cmdinfo->data_in_urb); if (!(cmdinfo->state & DATA_OUT_URB_INFLIGHT)) usb_free_urb(cmdinfo->data_out_urb); } static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata; lockdep_assert_held(&devinfo->lock); if (cmdinfo->state & (COMMAND_INFLIGHT | DATA_IN_URB_INFLIGHT | DATA_OUT_URB_INFLIGHT | COMMAND_ABORTED)) return -EBUSY; devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL; uas_free_unsubmitted_urbs(cmnd); scsi_done(cmnd); return 0; } static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd, unsigned direction) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); int err; cmdinfo->state |= direction | SUBMIT_STATUS_URB; err = uas_submit_urbs(cmnd, cmnd->device->hostdata); if (err) { uas_add_work(cmnd); } } static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *cmnd) { u8 response_code = riu->response_code; switch (response_code) { case RC_INCORRECT_LUN: set_host_byte(cmnd, DID_BAD_TARGET); break; case RC_TMF_SUCCEEDED: set_host_byte(cmnd, DID_OK); break; case RC_TMF_NOT_SUPPORTED: set_host_byte(cmnd, DID_BAD_TARGET); break; default: uas_log_cmd_state(cmnd, "response iu", response_code); set_host_byte(cmnd, DID_ERROR); break; } return response_code == RC_TMF_SUCCEEDED; } static void uas_stat_cmplt(struct urb *urb) { struct iu *iu = urb->transfer_buffer; struct Scsi_Host *shost = urb->context; struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; struct urb *data_in_urb = NULL; struct urb *data_out_urb = NULL; struct scsi_cmnd *cmnd; struct uas_cmd_info *cmdinfo; unsigned long flags; unsigned int idx; int status = urb->status; bool success; if (status) { if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN) dev_err(&urb->dev->dev, "stat urb: status %d\n", status); goto bail; } idx = be16_to_cpup(&iu->tag) - 1; spin_lock_irqsave(&devinfo->lock, flags); if (devinfo->resetting) goto out; if (idx >= MAX_CMNDS || !devinfo->cmnd[idx]) { dev_err(&urb->dev->dev, "stat urb: no pending cmd for uas-tag %d\n", idx + 1); goto out; } cmnd = devinfo->cmnd[idx]; cmdinfo = scsi_cmd_priv(cmnd); if (!(cmdinfo->state & COMMAND_INFLIGHT)) { uas_log_cmd_state(cmnd, "unexpected status cmplt", 0); goto out; } switch (iu->iu_id) { case IU_ID_STATUS: uas_sense(urb, cmnd); if (cmnd->result != 0) { /* cancel data transfers on error */ data_in_urb = usb_get_urb(cmdinfo->data_in_urb); data_out_urb = usb_get_urb(cmdinfo->data_out_urb); } cmdinfo->state &= ~COMMAND_INFLIGHT; uas_try_complete(cmnd, __func__); break; case IU_ID_READ_READY: if (!cmdinfo->data_in_urb || (cmdinfo->state & DATA_IN_URB_INFLIGHT)) { uas_log_cmd_state(cmnd, "unexpected read rdy", 0); break; } uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB); break; case IU_ID_WRITE_READY: if (!cmdinfo->data_out_urb || (cmdinfo->state & DATA_OUT_URB_INFLIGHT)) { uas_log_cmd_state(cmnd, "unexpected write rdy", 0); break; } uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB); break; case IU_ID_RESPONSE: cmdinfo->state &= ~COMMAND_INFLIGHT; success = uas_evaluate_response_iu((struct response_iu *)iu, cmnd); if (!success) { /* Error, cancel data transfers */ data_in_urb = usb_get_urb(cmdinfo->data_in_urb); data_out_urb = usb_get_urb(cmdinfo->data_out_urb); } uas_try_complete(cmnd, __func__); break; default: uas_log_cmd_state(cmnd, "bogus IU", iu->iu_id); } spin_unlock_irqrestore(&devinfo->lock, flags); usb_free_urb(urb); /* Unlinking of data urbs must be done without holding the lock */ if (data_in_urb) { usb_unlink_urb(data_in_urb); usb_put_urb(data_in_urb); } if (data_out_urb) { usb_unlink_urb(data_out_urb); usb_put_urb(data_out_urb); } return; out: spin_unlock_irqrestore(&devinfo->lock, flags); bail: usb_free_urb(urb); } static void uas_data_cmplt(struct urb *urb) { struct scsi_cmnd *cmnd = urb->context; struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata; struct scsi_data_buffer *sdb = &cmnd->sdb; unsigned long flags; int status = urb->status; spin_lock_irqsave(&devinfo->lock, flags); if (cmdinfo->data_in_urb == urb) { cmdinfo->state &= ~DATA_IN_URB_INFLIGHT; cmdinfo->data_in_urb = NULL; } else if (cmdinfo->data_out_urb == urb) { cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT; cmdinfo->data_out_urb = NULL; } if (devinfo->resetting) goto out; /* Data urbs should not complete before the cmd urb is submitted */ if (cmdinfo->state & SUBMIT_CMD_URB) { uas_log_cmd_state(cmnd, "unexpected data cmplt", 0); goto out; } if (status) { if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN) uas_log_cmd_state(cmnd, "data cmplt err", status); /* error: no data transfered */ scsi_set_resid(cmnd, sdb->length); set_host_byte(cmnd, DID_ERROR); } else { scsi_set_resid(cmnd, sdb->length - urb->actual_length); } uas_try_complete(cmnd, __func__); out: spin_unlock_irqrestore(&devinfo->lock, flags); usb_free_urb(urb); } static void uas_cmd_cmplt(struct urb *urb) { if (urb->status) dev_err(&urb->dev->dev, "cmd cmplt err %d\n", urb->status); usb_free_urb(urb); } static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp, struct scsi_cmnd *cmnd, enum dma_data_direction dir) { struct usb_device *udev = devinfo->udev; struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct urb *urb = usb_alloc_urb(0, gfp); struct scsi_data_buffer *sdb = &cmnd->sdb; unsigned int pipe = (dir == DMA_FROM_DEVICE) ? devinfo->data_in_pipe : devinfo->data_out_pipe; if (!urb) goto out; usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length, uas_data_cmplt, cmnd); if (devinfo->use_streams) urb->stream_id = cmdinfo->uas_tag; urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0; urb->sg = sdb->table.sgl; out: return urb; } static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp, struct scsi_cmnd *cmnd) { struct usb_device *udev = devinfo->udev; struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct urb *urb = usb_alloc_urb(0, gfp); struct sense_iu *iu; if (!urb) goto out; iu = kzalloc(sizeof(*iu), gfp); if (!iu) goto free; usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu), uas_stat_cmplt, cmnd->device->host); if (devinfo->use_streams) urb->stream_id = cmdinfo->uas_tag; urb->transfer_flags |= URB_FREE_BUFFER; out: return urb; free: usb_free_urb(urb); return NULL; } static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, struct scsi_cmnd *cmnd) { struct usb_device *udev = devinfo->udev; struct scsi_device *sdev = cmnd->device; struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct urb *urb = usb_alloc_urb(0, gfp); struct command_iu *iu; int len; if (!urb) goto out; len = cmnd->cmd_len - 16; if (len < 0) len = 0; len = ALIGN(len, 4); iu = kzalloc(sizeof(*iu) + len, gfp); if (!iu) goto free; iu->iu_id = IU_ID_COMMAND; iu->tag = cpu_to_be16(cmdinfo->uas_tag); iu->prio_attr = UAS_SIMPLE_TAG; iu->len = len; int_to_scsilun(sdev->lun, &iu->lun); memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len, uas_cmd_cmplt, NULL); urb->transfer_flags |= URB_FREE_BUFFER; out: return urb; free: usb_free_urb(urb); return NULL; } /* * Why should I request the Status IU before sending the Command IU? Spec * says to, but also says the device may receive them in any order. Seems * daft to me. */ static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp) { struct uas_dev_info *devinfo = cmnd->device->hostdata; struct urb *urb; int err; urb = uas_alloc_sense_urb(devinfo, gfp, cmnd); if (!urb) return -ENOMEM; usb_anchor_urb(urb, &devinfo->sense_urbs); err = usb_submit_urb(urb, gfp); if (err) { usb_unanchor_urb(urb); uas_log_cmd_state(cmnd, "sense submit err", err); usb_free_urb(urb); } return err; } static int uas_submit_urbs(struct scsi_cmnd *cmnd, struct uas_dev_info *devinfo) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); int err; lockdep_assert_held(&devinfo->lock); if (cmdinfo->state & SUBMIT_STATUS_URB) { err = uas_submit_sense_urb(cmnd, GFP_ATOMIC); if (err) return err; cmdinfo->state &= ~SUBMIT_STATUS_URB; } if (cmdinfo->state & ALLOC_DATA_IN_URB) { cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_FROM_DEVICE); if (!cmdinfo->data_in_urb) return -ENOMEM; cmdinfo->state &= ~ALLOC_DATA_IN_URB; } if (cmdinfo->state & SUBMIT_DATA_IN_URB) { usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs); err = usb_submit_urb(cmdinfo->data_in_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->data_in_urb); uas_log_cmd_state(cmnd, "data in submit err", err); return err; } cmdinfo->state &= ~SUBMIT_DATA_IN_URB; cmdinfo->state |= DATA_IN_URB_INFLIGHT; } if (cmdinfo->state & ALLOC_DATA_OUT_URB) { cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_TO_DEVICE); if (!cmdinfo->data_out_urb) return -ENOMEM; cmdinfo->state &= ~ALLOC_DATA_OUT_URB; } if (cmdinfo->state & SUBMIT_DATA_OUT_URB) { usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs); err = usb_submit_urb(cmdinfo->data_out_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->data_out_urb); uas_log_cmd_state(cmnd, "data out submit err", err); return err; } cmdinfo->state &= ~SUBMIT_DATA_OUT_URB; cmdinfo->state |= DATA_OUT_URB_INFLIGHT; } if (cmdinfo->state & ALLOC_CMD_URB) { cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd); if (!cmdinfo->cmd_urb) return -ENOMEM; cmdinfo->state &= ~ALLOC_CMD_URB; } if (cmdinfo->state & SUBMIT_CMD_URB) { usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs); err = usb_submit_urb(cmdinfo->cmd_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(cmdinfo->cmd_urb); uas_log_cmd_state(cmnd, "cmd submit err", err); return err; } cmdinfo->cmd_urb = NULL; cmdinfo->state &= ~SUBMIT_CMD_URB; cmdinfo->state |= COMMAND_INFLIGHT; } return 0; } static int uas_queuecommand_lck(struct scsi_cmnd *cmnd) { struct scsi_device *sdev = cmnd->device; struct uas_dev_info *devinfo = sdev->hostdata; struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); unsigned long flags; int idx, err; /* Re-check scsi_block_requests now that we've the host-lock */ if (cmnd->device->host->host_self_blocked) return SCSI_MLQUEUE_DEVICE_BUSY; if ((devinfo->flags & US_FL_NO_ATA_1X) && (cmnd->cmnd[0] == ATA_12 || cmnd->cmnd[0] == ATA_16)) { memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB, sizeof(usb_stor_sense_invalidCDB)); cmnd->result = SAM_STAT_CHECK_CONDITION; scsi_done(cmnd); return 0; } spin_lock_irqsave(&devinfo->lock, flags); if (devinfo->resetting) { set_host_byte(cmnd, DID_ERROR); scsi_done(cmnd); goto zombie; } /* Find a free uas-tag */ for (idx = 0; idx < devinfo->qdepth; idx++) { if (!devinfo->cmnd[idx]) break; } if (idx == devinfo->qdepth) { spin_unlock_irqrestore(&devinfo->lock, flags); return SCSI_MLQUEUE_DEVICE_BUSY; } memset(cmdinfo, 0, sizeof(*cmdinfo)); cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */ cmdinfo->state = SUBMIT_STATUS_URB | ALLOC_CMD_URB | SUBMIT_CMD_URB; switch (cmnd->sc_data_direction) { case DMA_FROM_DEVICE: cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB; break; case DMA_BIDIRECTIONAL: cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB; fallthrough; case DMA_TO_DEVICE: cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB; break; case DMA_NONE: break; } if (!devinfo->use_streams) cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); err = uas_submit_urbs(cmnd, devinfo); /* * in case of fatal errors the SCSI layer is peculiar * a command that has finished is a success for the purpose * of queueing, no matter how fatal the error */ if (err == -ENODEV) { if (cmdinfo->state & (COMMAND_INFLIGHT | DATA_IN_URB_INFLIGHT | DATA_OUT_URB_INFLIGHT)) goto out; set_host_byte(cmnd, DID_NO_CONNECT); scsi_done(cmnd); goto zombie; } if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { spin_unlock_irqrestore(&devinfo->lock, flags); return SCSI_MLQUEUE_DEVICE_BUSY; } uas_add_work(cmnd); } out: devinfo->cmnd[idx] = cmnd; zombie: spin_unlock_irqrestore(&devinfo->lock, flags); return 0; } static DEF_SCSI_QCMD(uas_queuecommand) /* * For now we do not support actually sending an abort to the device, so * this eh always fails. Still we must define it to make sure that we've * dropped all references to the cmnd in question once this function exits. */ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata; struct urb *data_in_urb = NULL; struct urb *data_out_urb = NULL; unsigned long flags; spin_lock_irqsave(&devinfo->lock, flags); uas_log_cmd_state(cmnd, __func__, 0); /* Ensure that try_complete does not call scsi_done */ cmdinfo->state |= COMMAND_ABORTED; /* Drop all refs to this cmnd, kill data urbs to break their ref */ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL; if (cmdinfo->state & DATA_IN_URB_INFLIGHT) data_in_urb = usb_get_urb(cmdinfo->data_in_urb); if (cmdinfo->state & DATA_OUT_URB_INFLIGHT) data_out_urb = usb_get_urb(cmdinfo->data_out_urb); uas_free_unsubmitted_urbs(cmnd); spin_unlock_irqrestore(&devinfo->lock, flags); if (data_in_urb) { usb_kill_urb(data_in_urb); usb_put_urb(data_in_urb); } if (data_out_urb) { usb_kill_urb(data_out_urb); usb_put_urb(data_out_urb); } return FAILED; } static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd) { struct scsi_device *sdev = cmnd->device; struct uas_dev_info *devinfo = sdev->hostdata; struct usb_device *udev = devinfo->udev; unsigned long flags; int err; err = usb_lock_device_for_reset(udev, devinfo->intf); if (err) { shost_printk(KERN_ERR, sdev->host, "%s FAILED to get lock err %d\n", __func__, err); return FAILED; } shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__); spin_lock_irqsave(&devinfo->lock, flags); devinfo->resetting = 1; spin_unlock_irqrestore(&devinfo->lock, flags); usb_kill_anchored_urbs(&devinfo->cmd_urbs); usb_kill_anchored_urbs(&devinfo->sense_urbs); usb_kill_anchored_urbs(&devinfo->data_urbs); uas_zap_pending(devinfo, DID_RESET); err = usb_reset_device(udev); spin_lock_irqsave(&devinfo->lock, flags); devinfo->resetting = 0; spin_unlock_irqrestore(&devinfo->lock, flags); usb_unlock_device(udev); if (err) { shost_printk(KERN_INFO, sdev->host, "%s FAILED err %d\n", __func__, err); return FAILED; } shost_printk(KERN_INFO, sdev->host, "%s success\n", __func__); return SUCCESS; } static int uas_target_alloc(struct scsi_target *starget) { struct uas_dev_info *devinfo = (struct uas_dev_info *) dev_to_shost(starget->dev.parent)->hostdata; if (devinfo->flags & US_FL_NO_REPORT_LUNS) starget->no_report_luns = 1; return 0; } static int uas_sdev_init(struct scsi_device *sdev) { struct uas_dev_info *devinfo = (struct uas_dev_info *)sdev->host->hostdata; /* * Some USB storage devices reset if the IO advice hints grouping mode * page is queried. Hence skip that mode page. */ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; sdev->hostdata = devinfo; return 0; } static int uas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct uas_dev_info *devinfo = sdev->hostdata; if (devinfo->flags & US_FL_MAX_SECTORS_64) lim->max_hw_sectors = 64; else if (devinfo->flags & US_FL_MAX_SECTORS_240) lim->max_hw_sectors = 240; if (devinfo->flags & US_FL_NO_REPORT_OPCODES) sdev->no_report_opcodes = 1; /* A few buggy USB-ATA bridges don't understand FUA */ if (devinfo->flags & US_FL_BROKEN_FUA) sdev->broken_fua = 1; /* UAS also needs to support FL_ALWAYS_SYNC */ if (devinfo->flags & US_FL_ALWAYS_SYNC) { sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_8 = 1; sdev->wce_default_on = 1; } /* Some disks cannot handle READ_CAPACITY_16 */ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16) sdev->no_read_capacity_16 = 1; /* Some disks cannot handle WRITE_SAME */ if (devinfo->flags & US_FL_NO_SAME) sdev->no_write_same = 1; /* * Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. * If this device makes that mistake, tell the sd driver. */ if (devinfo->flags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; /* * in some cases we have to guess */ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS) sdev->guess_capacity = 1; /* * Some devices report generic values until the media has been * accessed. Force a READ(10) prior to querying device * characteristics. */ sdev->read_before_ms = 1; /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device * is write-protected. Now that we tell the sd driver * to do a 192-byte transfer with this command the * majority of devices work fine, but a few still can't * handle it. The sd driver will simply assume those * devices are write-enabled. */ if (devinfo->flags & US_FL_NO_WP_DETECT) sdev->skip_ms_page_3f = 1; scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } static const struct scsi_host_template uas_host_template = { .module = THIS_MODULE, .name = "uas", .queuecommand = uas_queuecommand, .target_alloc = uas_target_alloc, .sdev_init = uas_sdev_init, .sdev_configure = uas_sdev_configure, .eh_abort_handler = uas_eh_abort_handler, .eh_device_reset_handler = uas_eh_device_reset_handler, .this_id = -1, .skip_settle_delay = 1, /* * The protocol has no requirements on alignment in the strict sense. * Controllers may or may not have alignment restrictions. * As this is not exported, we use an extremely conservative guess. */ .dma_alignment = 511, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct uas_cmd_info), }; #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id uas_usb_ids[] = { # include "unusual_uas.h" { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) }, { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) }, { } }; MODULE_DEVICE_TABLE(usb, uas_usb_ids); #undef UNUSUAL_DEV static int uas_switch_interface(struct usb_device *udev, struct usb_interface *intf) { struct usb_host_interface *alt; alt = uas_find_uas_alt_setting(intf); if (!alt) return -ENODEV; return usb_set_interface(udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } static int uas_configure_endpoints(struct uas_dev_info *devinfo) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = devinfo->udev; int r; r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps); if (r) return r; devinfo->cmd_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(&eps[0]->desc)); devinfo->status_pipe = usb_rcvbulkpipe(udev, usb_endpoint_num(&eps[1]->desc)); devinfo->data_in_pipe = usb_rcvbulkpipe(udev, usb_endpoint_num(&eps[2]->desc)); devinfo->data_out_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(&eps[3]->desc)); if (udev->speed < USB_SPEED_SUPER) { devinfo->qdepth = 32; devinfo->use_streams = 0; } else { devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, MAX_CMNDS, GFP_NOIO); if (devinfo->qdepth < 0) return devinfo->qdepth; devinfo->use_streams = 1; } return 0; } static void uas_free_streams(struct uas_dev_info *devinfo) { struct usb_device *udev = devinfo->udev; struct usb_host_endpoint *eps[3]; eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe); eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe); eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe); usb_free_streams(devinfo->intf, eps, 3, GFP_NOIO); } static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) { int result = -ENOMEM; struct Scsi_Host *shost = NULL; struct uas_dev_info *devinfo; struct usb_device *udev = interface_to_usbdev(intf); u64 dev_flags; if (!uas_use_uas_driver(intf, id, &dev_flags)) return -ENODEV; if (uas_switch_interface(udev, intf)) return -ENODEV; shost = scsi_host_alloc(&uas_host_template, sizeof(struct uas_dev_info)); if (!shost) goto set_alt0; shost->max_cmd_len = 16 + 252; shost->max_id = 1; shost->max_lun = 256; shost->max_channel = 0; shost->sg_tablesize = udev->bus->sg_tablesize; devinfo = (struct uas_dev_info *)shost->hostdata; devinfo->intf = intf; devinfo->udev = udev; devinfo->resetting = 0; devinfo->shutdown = 0; devinfo->flags = dev_flags; init_usb_anchor(&devinfo->cmd_urbs); init_usb_anchor(&devinfo->sense_urbs); init_usb_anchor(&devinfo->data_urbs); spin_lock_init(&devinfo->lock); INIT_WORK(&devinfo->work, uas_do_work); INIT_WORK(&devinfo->scan_work, uas_scan_work); result = uas_configure_endpoints(devinfo); if (result) goto set_alt0; /* * 1 tag is reserved for untagged commands + * 1 tag to avoid off by one errors in some bridge firmwares */ shost->can_queue = devinfo->qdepth - 2; usb_set_intfdata(intf, shost); result = scsi_add_host(shost, &intf->dev); if (result) goto free_streams; /* Submit the delayed_work for SCSI-device scanning */ schedule_work(&devinfo->scan_work); return result; free_streams: uas_free_streams(devinfo); usb_set_intfdata(intf, NULL); set_alt0: usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0); if (shost) scsi_host_put(shost); return result; } static int uas_cmnd_list_empty(struct uas_dev_info *devinfo) { unsigned long flags; int i, r = 1; spin_lock_irqsave(&devinfo->lock, flags); for (i = 0; i < devinfo->qdepth; i++) { if (devinfo->cmnd[i]) { r = 0; /* Not empty */ break; } } spin_unlock_irqrestore(&devinfo->lock, flags); return r; } /* * Wait for any pending cmnds to complete, on usb-2 sense_urbs may temporarily * get empty while there still is more work to do due to sense-urbs completing * with a READ/WRITE_READY iu code, so keep waiting until the list gets empty. */ static int uas_wait_for_pending_cmnds(struct uas_dev_info *devinfo) { unsigned long start_time; int r; start_time = jiffies; do { flush_work(&devinfo->work); r = usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000); if (r == 0) return -ETIME; r = usb_wait_anchor_empty_timeout(&devinfo->data_urbs, 500); if (r == 0) return -ETIME; if (time_after(jiffies, start_time + 5 * HZ)) return -ETIME; } while (!uas_cmnd_list_empty(devinfo)); return 0; } static int uas_pre_reset(struct usb_interface *intf) { struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; unsigned long flags; if (devinfo->shutdown) return 0; /* Block new requests */ spin_lock_irqsave(shost->host_lock, flags); scsi_block_requests(shost); spin_unlock_irqrestore(shost->host_lock, flags); if (uas_wait_for_pending_cmnds(devinfo) != 0) { shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__); scsi_unblock_requests(shost); return 1; } uas_free_streams(devinfo); return 0; } static int uas_post_reset(struct usb_interface *intf) { struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; unsigned long flags; int err; if (devinfo->shutdown) return 0; err = uas_configure_endpoints(devinfo); if (err && err != -ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); /* we must unblock the host in every case lest we deadlock */ spin_lock_irqsave(shost->host_lock, flags); scsi_report_bus_reset(shost, 0); spin_unlock_irqrestore(shost->host_lock, flags); scsi_unblock_requests(shost); return err ? 1 : 0; } static int uas_suspend(struct usb_interface *intf, pm_message_t message) { struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; if (uas_wait_for_pending_cmnds(devinfo) != 0) { shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__); return -ETIME; } return 0; } static int uas_resume(struct usb_interface *intf) { return 0; } static int uas_reset_resume(struct usb_interface *intf) { struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; unsigned long flags; int err; err = uas_configure_endpoints(devinfo); if (err) { shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); return -EIO; } spin_lock_irqsave(shost->host_lock, flags); scsi_report_bus_reset(shost, 0); spin_unlock_irqrestore(shost->host_lock, flags); return 0; } static void uas_disconnect(struct usb_interface *intf) { struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; unsigned long flags; spin_lock_irqsave(&devinfo->lock, flags); devinfo->resetting = 1; spin_unlock_irqrestore(&devinfo->lock, flags); cancel_work_sync(&devinfo->work); usb_kill_anchored_urbs(&devinfo->cmd_urbs); usb_kill_anchored_urbs(&devinfo->sense_urbs); usb_kill_anchored_urbs(&devinfo->data_urbs); uas_zap_pending(devinfo, DID_NO_CONNECT); /* * Prevent SCSI scanning (if it hasn't started yet) * or wait for the SCSI-scanning routine to stop. */ cancel_work_sync(&devinfo->scan_work); scsi_remove_host(shost); uas_free_streams(devinfo); scsi_host_put(shost); } /* * Put the device back in usb-storage mode on shutdown, as some BIOS-es * hang on reboot when the device is still in uas mode. Note the reset is * necessary as some devices won't revert to usb-storage mode without it. */ static void uas_shutdown(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct Scsi_Host *shost = usb_get_intfdata(intf); struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata; if (system_state != SYSTEM_RESTART) return; devinfo->shutdown = 1; uas_free_streams(devinfo); usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0); usb_reset_device(udev); } static struct usb_driver uas_driver = { .name = "uas", .probe = uas_probe, .disconnect = uas_disconnect, .pre_reset = uas_pre_reset, .post_reset = uas_post_reset, .suspend = uas_suspend, .resume = uas_resume, .reset_resume = uas_reset_resume, .shutdown = uas_shutdown, .id_table = uas_usb_ids, }; static int __init uas_init(void) { int rv; workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!workqueue) return -ENOMEM; rv = usb_register(&uas_driver); if (rv) { destroy_workqueue(workqueue); return -ENOMEM; } return 0; } static void __exit uas_exit(void) { usb_deregister(&uas_driver); destroy_workqueue(workqueue); } module_init(uas_init); module_exit(uas_exit); MODULE_DESCRIPTION("USB Attached SCSI driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); MODULE_AUTHOR( "Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp"); |
| 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 | // SPDX-License-Identifier: GPL-2.0-only /* * The industrial I/O core * * Copyright (c) 2008 Jonathan Cameron * * Based on elements of hwmon and input subsystems. */ #define pr_fmt(fmt) "iio-core: " fmt #include <linux/anon_inodes.h> #include <linux/cdev.h> #include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/idr.h> #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/property.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/wordpart.h> #include <linux/iio/buffer.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/events.h> #include <linux/iio/iio-opaque.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include "iio_core.h" #include "iio_core_trigger.h" /* IDA to assign each registered device a unique id */ static DEFINE_IDA(iio_ida); static dev_t iio_devt; #define IIO_DEV_MAX 256 const struct bus_type iio_bus_type = { .name = "iio", }; EXPORT_SYMBOL(iio_bus_type); static struct dentry *iio_debugfs_dentry; static const char * const iio_direction[] = { [0] = "in", [1] = "out", }; static const char * const iio_chan_type_name_spec[] = { [IIO_VOLTAGE] = "voltage", [IIO_CURRENT] = "current", [IIO_POWER] = "power", [IIO_ACCEL] = "accel", [IIO_ANGL_VEL] = "anglvel", [IIO_MAGN] = "magn", [IIO_LIGHT] = "illuminance", [IIO_INTENSITY] = "intensity", [IIO_PROXIMITY] = "proximity", [IIO_TEMP] = "temp", [IIO_INCLI] = "incli", [IIO_ROT] = "rot", [IIO_ANGL] = "angl", [IIO_TIMESTAMP] = "timestamp", [IIO_CAPACITANCE] = "capacitance", [IIO_ALTVOLTAGE] = "altvoltage", [IIO_CCT] = "cct", [IIO_PRESSURE] = "pressure", [IIO_HUMIDITYRELATIVE] = "humidityrelative", [IIO_ACTIVITY] = "activity", [IIO_STEPS] = "steps", [IIO_ENERGY] = "energy", [IIO_DISTANCE] = "distance", [IIO_VELOCITY] = "velocity", [IIO_CONCENTRATION] = "concentration", [IIO_RESISTANCE] = "resistance", [IIO_PH] = "ph", [IIO_UVINDEX] = "uvindex", [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", [IIO_COUNT] = "count", [IIO_INDEX] = "index", [IIO_GRAVITY] = "gravity", [IIO_POSITIONRELATIVE] = "positionrelative", [IIO_PHASE] = "phase", [IIO_MASSCONCENTRATION] = "massconcentration", [IIO_DELTA_ANGL] = "deltaangl", [IIO_DELTA_VELOCITY] = "deltavelocity", [IIO_COLORTEMP] = "colortemp", [IIO_CHROMATICITY] = "chromaticity", [IIO_ATTENTION] = "attention", [IIO_ALTCURRENT] = "altcurrent", }; static const char * const iio_modifier_names[] = { [IIO_MOD_X] = "x", [IIO_MOD_Y] = "y", [IIO_MOD_Z] = "z", [IIO_MOD_X_AND_Y] = "x&y", [IIO_MOD_X_AND_Z] = "x&z", [IIO_MOD_Y_AND_Z] = "y&z", [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", [IIO_MOD_X_OR_Y] = "x|y", [IIO_MOD_X_OR_Z] = "x|z", [IIO_MOD_Y_OR_Z] = "y|z", [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", [IIO_MOD_LIGHT_BOTH] = "both", [IIO_MOD_LIGHT_IR] = "ir", [IIO_MOD_LIGHT_CLEAR] = "clear", [IIO_MOD_LIGHT_RED] = "red", [IIO_MOD_LIGHT_GREEN] = "green", [IIO_MOD_LIGHT_BLUE] = "blue", [IIO_MOD_LIGHT_UV] = "uv", [IIO_MOD_LIGHT_UVA] = "uva", [IIO_MOD_LIGHT_UVB] = "uvb", [IIO_MOD_LIGHT_DUV] = "duv", [IIO_MOD_QUATERNION] = "quaternion", [IIO_MOD_TEMP_AMBIENT] = "ambient", [IIO_MOD_TEMP_OBJECT] = "object", [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", [IIO_MOD_NORTH_TRUE] = "from_north_true", [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", [IIO_MOD_RUNNING] = "running", [IIO_MOD_JOGGING] = "jogging", [IIO_MOD_WALKING] = "walking", [IIO_MOD_STILL] = "still", [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", [IIO_MOD_I] = "i", [IIO_MOD_Q] = "q", [IIO_MOD_CO2] = "co2", [IIO_MOD_VOC] = "voc", [IIO_MOD_PM1] = "pm1", [IIO_MOD_PM2P5] = "pm2p5", [IIO_MOD_PM4] = "pm4", [IIO_MOD_PM10] = "pm10", [IIO_MOD_ETHANOL] = "ethanol", [IIO_MOD_H2] = "h2", [IIO_MOD_O2] = "o2", [IIO_MOD_LINEAR_X] = "linear_x", [IIO_MOD_LINEAR_Y] = "linear_y", [IIO_MOD_LINEAR_Z] = "linear_z", [IIO_MOD_PITCH] = "pitch", [IIO_MOD_YAW] = "yaw", [IIO_MOD_ROLL] = "roll", [IIO_MOD_RMS] = "rms", [IIO_MOD_ACTIVE] = "active", [IIO_MOD_REACTIVE] = "reactive", [IIO_MOD_APPARENT] = "apparent", }; /* relies on pairs of these shared then separate */ static const char * const iio_chan_info_postfix[] = { [IIO_CHAN_INFO_RAW] = "raw", [IIO_CHAN_INFO_PROCESSED] = "input", [IIO_CHAN_INFO_SCALE] = "scale", [IIO_CHAN_INFO_OFFSET] = "offset", [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", [IIO_CHAN_INFO_PEAK] = "peak_raw", [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] = "filter_low_pass_3db_frequency", [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] = "filter_high_pass_3db_frequency", [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", [IIO_CHAN_INFO_FREQUENCY] = "frequency", [IIO_CHAN_INFO_PHASE] = "phase", [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", [IIO_CHAN_INFO_INT_TIME] = "integration_time", [IIO_CHAN_INFO_ENABLE] = "en", [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", [IIO_CHAN_INFO_TROUGH] = "trough_raw", [IIO_CHAN_INFO_CONVDELAY] = "convdelay", [IIO_CHAN_INFO_POWERFACTOR] = "powerfactor", }; /** * iio_device_id() - query the unique ID for the device * @indio_dev: Device structure whose ID is being queried * * The IIO device ID is a unique index used for example for the naming * of the character device /dev/iio\:device[ID]. * * Returns: Unique ID for the device. */ int iio_device_id(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); return iio_dev_opaque->id; } EXPORT_SYMBOL_GPL(iio_device_id); /** * iio_buffer_enabled() - helper function to test if the buffer is enabled * @indio_dev: IIO device structure for device * * Returns: True, if the buffer is enabled. */ bool iio_buffer_enabled(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); return iio_dev_opaque->currentmode & INDIO_ALL_BUFFER_MODES; } EXPORT_SYMBOL_GPL(iio_buffer_enabled); #if defined(CONFIG_DEBUG_FS) /* * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined */ struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); return iio_dev_opaque->debugfs_dentry; } EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); #endif /** * iio_find_channel_from_si() - get channel from its scan index * @indio_dev: device * @si: scan index to match * * Returns: * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure. */ const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) { int i; for (i = 0; i < indio_dev->num_channels; i++) if (indio_dev->channels[i].scan_index == si) return &indio_dev->channels[i]; return NULL; } /* This turns up an awful lot */ ssize_t iio_read_const_attr(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); } EXPORT_SYMBOL(iio_read_const_attr); /** * iio_device_set_clock() - Set current timestamping clock for the device * @indio_dev: IIO device structure containing the device * @clock_id: timestamping clock POSIX identifier to set. * * Returns: 0 on success, or a negative error code. */ int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) { int ret; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); if (ret) return ret; if ((ev_int && iio_event_enabled(ev_int)) || iio_buffer_enabled(indio_dev)) { mutex_unlock(&iio_dev_opaque->mlock); return -EBUSY; } iio_dev_opaque->clock_id = clock_id; mutex_unlock(&iio_dev_opaque->mlock); return 0; } EXPORT_SYMBOL(iio_device_set_clock); /** * iio_device_get_clock() - Retrieve current timestamping clock for the device * @indio_dev: IIO device structure containing the device * * Returns: Clock ID of the current timestamping clock for the device. */ clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); return iio_dev_opaque->clock_id; } EXPORT_SYMBOL(iio_device_get_clock); /** * iio_get_time_ns() - utility function to get a time stamp for events etc * @indio_dev: device * * Returns: Timestamp of the event in nanoseconds. */ s64 iio_get_time_ns(const struct iio_dev *indio_dev) { struct timespec64 tp; switch (iio_device_get_clock(indio_dev)) { case CLOCK_REALTIME: return ktime_get_real_ns(); case CLOCK_MONOTONIC: return ktime_get_ns(); case CLOCK_MONOTONIC_RAW: return ktime_get_raw_ns(); case CLOCK_REALTIME_COARSE: return ktime_to_ns(ktime_get_coarse_real()); case CLOCK_MONOTONIC_COARSE: ktime_get_coarse_ts64(&tp); return timespec64_to_ns(&tp); case CLOCK_BOOTTIME: return ktime_get_boottime_ns(); case CLOCK_TAI: return ktime_get_clocktai_ns(); default: BUG(); } } EXPORT_SYMBOL(iio_get_time_ns); static int __init iio_init(void) { int ret; /* Register sysfs bus */ ret = bus_register(&iio_bus_type); if (ret < 0) { pr_err("could not register bus type\n"); goto error_nothing; } ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); if (ret < 0) { pr_err("failed to allocate char dev region\n"); goto error_unregister_bus_type; } iio_debugfs_dentry = debugfs_create_dir("iio", NULL); return 0; error_unregister_bus_type: bus_unregister(&iio_bus_type); error_nothing: return ret; } static void __exit iio_exit(void) { if (iio_devt) unregister_chrdev_region(iio_devt, IIO_DEV_MAX); bus_unregister(&iio_bus_type); debugfs_remove(iio_debugfs_dentry); } #if defined(CONFIG_DEBUG_FS) static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct iio_dev *indio_dev = file->private_data; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned int val = 0; int ret; if (*ppos > 0) return simple_read_from_buffer(userbuf, count, ppos, iio_dev_opaque->read_buf, iio_dev_opaque->read_buf_len); ret = indio_dev->info->debugfs_reg_access(indio_dev, iio_dev_opaque->cached_reg_addr, 0, &val); if (ret) { dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); return ret; } iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, sizeof(iio_dev_opaque->read_buf), "0x%X\n", val); return simple_read_from_buffer(userbuf, count, ppos, iio_dev_opaque->read_buf, iio_dev_opaque->read_buf_len); } static ssize_t iio_debugfs_write_reg(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct iio_dev *indio_dev = file->private_data; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned int reg, val; char buf[80]; int ret; if (count >= sizeof(buf)) return -EINVAL; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (ret < 0) return ret; buf[ret] = '\0'; ret = sscanf(buf, "%i %i", ®, &val); switch (ret) { case 1: iio_dev_opaque->cached_reg_addr = reg; break; case 2: iio_dev_opaque->cached_reg_addr = reg; ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, val, NULL); if (ret) { dev_err(indio_dev->dev.parent, "%s: write failed\n", __func__); return ret; } break; default: return -EINVAL; } return count; } static const struct file_operations iio_debugfs_reg_fops = { .open = simple_open, .read = iio_debugfs_read_reg, .write = iio_debugfs_write_reg, }; static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); } static void iio_device_register_debugfs(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque; if (indio_dev->info->debugfs_reg_access == NULL) return; if (!iio_debugfs_dentry) return; iio_dev_opaque = to_iio_dev_opaque(indio_dev); iio_dev_opaque->debugfs_dentry = debugfs_create_dir(dev_name(&indio_dev->dev), iio_debugfs_dentry); debugfs_create_file("direct_reg_access", 0644, iio_dev_opaque->debugfs_dentry, indio_dev, &iio_debugfs_reg_fops); } #else static void iio_device_register_debugfs(struct iio_dev *indio_dev) { } static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) { } #endif /* CONFIG_DEBUG_FS */ static ssize_t iio_read_channel_ext_info(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); const struct iio_chan_spec_ext_info *ext_info; ext_info = &this_attr->c->ext_info[this_attr->address]; return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); } static ssize_t iio_write_channel_ext_info(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); const struct iio_chan_spec_ext_info *ext_info; ext_info = &this_attr->c->ext_info[this_attr->address]; return ext_info->write(indio_dev, ext_info->private, this_attr->c, buf, len); } ssize_t iio_enum_available_read(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf) { const struct iio_enum *e = (const struct iio_enum *)priv; unsigned int i; size_t len = 0; if (!e->num_items) return 0; for (i = 0; i < e->num_items; ++i) { if (!e->items[i]) continue; len += sysfs_emit_at(buf, len, "%s ", e->items[i]); } /* replace last space with a newline */ buf[len - 1] = '\n'; return len; } EXPORT_SYMBOL_GPL(iio_enum_available_read); ssize_t iio_enum_read(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf) { const struct iio_enum *e = (const struct iio_enum *)priv; int i; if (!e->get) return -EINVAL; i = e->get(indio_dev, chan); if (i < 0) return i; if (i >= e->num_items || !e->items[i]) return -EINVAL; return sysfs_emit(buf, "%s\n", e->items[i]); } EXPORT_SYMBOL_GPL(iio_enum_read); ssize_t iio_enum_write(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, size_t len) { const struct iio_enum *e = (const struct iio_enum *)priv; int ret; if (!e->set) return -EINVAL; ret = __sysfs_match_string(e->items, e->num_items, buf); if (ret < 0) return ret; ret = e->set(indio_dev, chan, ret); return ret ? ret : len; } EXPORT_SYMBOL_GPL(iio_enum_write); static const struct iio_mount_matrix iio_mount_idmatrix = { .rotation = { "1", "0", "0", "0", "1", "0", "0", "0", "1" } }; static int iio_setup_mount_idmatrix(const struct device *dev, struct iio_mount_matrix *matrix) { *matrix = iio_mount_idmatrix; dev_info(dev, "mounting matrix not found: using identity...\n"); return 0; } ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf) { const struct iio_mount_matrix *mtx; mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan); if (IS_ERR(mtx)) return PTR_ERR(mtx); if (!mtx) mtx = &iio_mount_idmatrix; return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); } EXPORT_SYMBOL_GPL(iio_show_mount_matrix); /** * iio_read_mount_matrix() - retrieve iio device mounting matrix from * device "mount-matrix" property * @dev: device the mounting matrix property is assigned to * @matrix: where to store retrieved matrix * * If device is assigned no mounting matrix property, a default 3x3 identity * matrix will be filled in. * * Returns: 0 if success, or a negative error code on failure. */ int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) { size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); int err; err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); if (err == len) return 0; if (err >= 0) /* Invalid number of matrix entries. */ return -EINVAL; if (err != -EINVAL) /* Invalid matrix declaration format. */ return err; /* Matrix was not declared at all: fallback to identity. */ return iio_setup_mount_idmatrix(dev, matrix); } EXPORT_SYMBOL(iio_read_mount_matrix); static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, int size, const int *vals) { int tmp0, tmp1; s64 tmp2; bool scale_db = false; switch (type) { case IIO_VAL_INT: return sysfs_emit_at(buf, offset, "%d", vals[0]); case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; fallthrough; case IIO_VAL_INT_PLUS_MICRO: if (vals[1] < 0) return sysfs_emit_at(buf, offset, "-%d.%06u%s", abs(vals[0]), -vals[1], scale_db ? " dB" : ""); else return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], vals[1], scale_db ? " dB" : ""); case IIO_VAL_INT_PLUS_NANO: if (vals[1] < 0) return sysfs_emit_at(buf, offset, "-%d.%09u", abs(vals[0]), -vals[1]); else return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], vals[1]); case IIO_VAL_FRACTIONAL: tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); if ((tmp2 < 0) && (tmp0 == 0)) return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); else return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_FRACTIONAL_LOG2: tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); if (tmp0 == 0 && tmp2 < 0) return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); else return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_INT_MULTIPLE: { int i; int l = 0; for (i = 0; i < size; ++i) l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); return l; } case IIO_VAL_CHAR: return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); case IIO_VAL_INT_64: tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); return sysfs_emit_at(buf, offset, "%lld", tmp2); default: return 0; } } /** * iio_format_value() - Formats a IIO value into its string representation * @buf: The buffer to which the formatted value gets written * which is assumed to be big enough (i.e. PAGE_SIZE). * @type: One of the IIO_VAL_* constants. This decides how the val * and val2 parameters are formatted. * @size: Number of IIO value entries contained in vals * @vals: Pointer to the values, exact meaning depends on the * type parameter. * * Returns: * 0 by default, a negative number on failure or the total number of characters * written for a type that belongs to the IIO_VAL_* constant. */ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) { ssize_t len; len = __iio_format_value(buf, 0, type, size, vals); if (len >= PAGE_SIZE - 1) return -EFBIG; return len + sysfs_emit_at(buf, len, "\n"); } EXPORT_SYMBOL_GPL(iio_format_value); ssize_t do_iio_read_channel_label(struct iio_dev *indio_dev, const struct iio_chan_spec *c, char *buf) { if (indio_dev->info->read_label) return indio_dev->info->read_label(indio_dev, c, buf); if (c->extend_name) return sysfs_emit(buf, "%s\n", c->extend_name); return -EINVAL; } static ssize_t iio_read_channel_label(struct device *dev, struct device_attribute *attr, char *buf) { return do_iio_read_channel_label(dev_to_iio_dev(dev), to_iio_dev_attr(attr)->c, buf); } static ssize_t iio_read_channel_info(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int vals[INDIO_MAX_RAW_ELEMENTS]; int ret; int val_len = 2; if (indio_dev->info->read_raw_multi) ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, INDIO_MAX_RAW_ELEMENTS, vals, &val_len, this_attr->address); else if (indio_dev->info->read_raw) ret = indio_dev->info->read_raw(indio_dev, this_attr->c, &vals[0], &vals[1], this_attr->address); else return -EINVAL; if (ret < 0) return ret; return iio_format_value(buf, ret, val_len, vals); } static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, const char *prefix, const char *suffix) { ssize_t len; int stride; int i; switch (type) { case IIO_VAL_INT: case IIO_VAL_CHAR: stride = 1; break; default: stride = 2; break; } len = sysfs_emit(buf, prefix); for (i = 0; i <= length - stride; i += stride) { if (i != 0) { len += sysfs_emit_at(buf, len, " "); if (len >= PAGE_SIZE) return -EFBIG; } len += __iio_format_value(buf, len, type, stride, &vals[i]); if (len >= PAGE_SIZE) return -EFBIG; } len += sysfs_emit_at(buf, len, "%s\n", suffix); return len; } static ssize_t iio_format_avail_list(char *buf, const int *vals, int type, int length) { return iio_format_list(buf, vals, type, length, "", ""); } static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) { int length; /* * length refers to the array size , not the number of elements. * The purpose is to print the range [min , step ,max] so length should * be 3 in case of int, and 6 for other types. */ switch (type) { case IIO_VAL_INT: length = 3; break; default: length = 6; break; } return iio_format_list(buf, vals, type, length, "[", "]"); } static ssize_t iio_read_channel_info_avail(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); const int *vals; int ret; int length; int type; if (!indio_dev->info->read_avail) return -EINVAL; ret = indio_dev->info->read_avail(indio_dev, this_attr->c, &vals, &type, &length, this_attr->address); if (ret < 0) return ret; switch (ret) { case IIO_AVAIL_LIST: return iio_format_avail_list(buf, vals, type, length); case IIO_AVAIL_RANGE: return iio_format_avail_range(buf, vals, type); default: return -EINVAL; } } /** * __iio_str_to_fixpoint() - Parse a fixed-point number from a string * @str: The string to parse * @fract_mult: Multiplier for the first decimal place, should be a power of 10 * @integer: The integer part of the number * @fract: The fractional part of the number * @scale_db: True if this should parse as dB * * Returns: * 0 on success, or a negative error code if the string could not be parsed. */ static int __iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, int *fract, bool scale_db) { int i = 0, f = 0; bool integer_part = true, negative = false; if (fract_mult == 0) { *fract = 0; return kstrtoint(str, 0, integer); } if (str[0] == '-') { negative = true; str++; } else if (str[0] == '+') { str++; } while (*str) { if ('0' <= *str && *str <= '9') { if (integer_part) { i = i * 10 + *str - '0'; } else { f += fract_mult * (*str - '0'); fract_mult /= 10; } } else if (*str == '\n') { if (*(str + 1) == '\0') break; return -EINVAL; } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { /* Ignore the dB suffix */ str += sizeof(" dB") - 1; continue; } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { /* Ignore the dB suffix */ str += sizeof("dB") - 1; continue; } else if (*str == '.' && integer_part) { integer_part = false; } else { return -EINVAL; } str++; } if (negative) { if (i) i = -i; else f = -f; } *integer = i; *fract = f; return 0; } /** * iio_str_to_fixpoint() - Parse a fixed-point number from a string * @str: The string to parse * @fract_mult: Multiplier for the first decimal place, should be a power of 10 * @integer: The integer part of the number * @fract: The fractional part of the number * * Returns: * 0 on success, or a negative error code if the string could not be parsed. */ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, int *fract) { return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); } EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); static ssize_t iio_write_channel_info(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret, fract_mult = 100000; int integer, fract = 0; long long integer64; bool is_char = false; bool scale_db = false; bool is_64bit = false; /* Assumes decimal - precision based on number of digits */ if (!indio_dev->info->write_raw) return -EINVAL; if (indio_dev->info->write_raw_get_fmt) switch (indio_dev->info->write_raw_get_fmt(indio_dev, this_attr->c, this_attr->address)) { case IIO_VAL_INT: fract_mult = 0; break; case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; fallthrough; case IIO_VAL_INT_PLUS_MICRO: fract_mult = 100000; break; case IIO_VAL_INT_PLUS_NANO: fract_mult = 100000000; break; case IIO_VAL_CHAR: is_char = true; break; case IIO_VAL_INT_64: is_64bit = true; break; default: return -EINVAL; } if (is_char) { char ch; if (sscanf(buf, "%c", &ch) != 1) return -EINVAL; integer = ch; } else if (is_64bit) { ret = kstrtoll(buf, 0, &integer64); if (ret) return ret; fract = upper_32_bits(integer64); integer = lower_32_bits(integer64); } else { ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, scale_db); if (ret) return ret; } ret = indio_dev->info->write_raw(indio_dev, this_attr->c, integer, fract, this_attr->address); if (ret) return ret; return len; } static int __iio_device_attr_init(struct device_attribute *dev_attr, const char *postfix, struct iio_chan_spec const *chan, ssize_t (*readfunc)(struct device *dev, struct device_attribute *attr, char *buf), ssize_t (*writefunc)(struct device *dev, struct device_attribute *attr, const char *buf, size_t len), enum iio_shared_by shared_by) { int ret = 0; char *name = NULL; char *full_postfix; sysfs_attr_init(&dev_attr->attr); /* Build up postfix of <extend_name>_<modifier>_postfix */ if (chan->modified && (shared_by == IIO_SEPARATE)) { if (chan->extend_name) full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_modifier_names[chan->channel2], chan->extend_name, postfix); else full_postfix = kasprintf(GFP_KERNEL, "%s_%s", iio_modifier_names[chan->channel2], postfix); } else { if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) full_postfix = kstrdup(postfix, GFP_KERNEL); else full_postfix = kasprintf(GFP_KERNEL, "%s_%s", chan->extend_name, postfix); } if (full_postfix == NULL) return -ENOMEM; if (chan->differential) { /* Differential can not have modifier */ switch (shared_by) { case IIO_SHARED_BY_ALL: name = kasprintf(GFP_KERNEL, "%s", full_postfix); break; case IIO_SHARED_BY_DIR: name = kasprintf(GFP_KERNEL, "%s_%s", iio_direction[chan->output], full_postfix); break; case IIO_SHARED_BY_TYPE: name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", iio_direction[chan->output], iio_chan_type_name_spec[chan->type], iio_chan_type_name_spec[chan->type], full_postfix); break; case IIO_SEPARATE: if (!chan->indexed) { WARN(1, "Differential channels must be indexed\n"); ret = -EINVAL; goto error_free_full_postfix; } name = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s", iio_direction[chan->output], iio_chan_type_name_spec[chan->type], chan->channel, iio_chan_type_name_spec[chan->type], chan->channel2, full_postfix); break; } } else { /* Single ended */ switch (shared_by) { case IIO_SHARED_BY_ALL: name = kasprintf(GFP_KERNEL, "%s", full_postfix); break; case IIO_SHARED_BY_DIR: name = kasprintf(GFP_KERNEL, "%s_%s", iio_direction[chan->output], full_postfix); break; case IIO_SHARED_BY_TYPE: name = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_direction[chan->output], iio_chan_type_name_spec[chan->type], full_postfix); break; case IIO_SEPARATE: if (chan->indexed) name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", iio_direction[chan->output], iio_chan_type_name_spec[chan->type], chan->channel, full_postfix); else name = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_direction[chan->output], iio_chan_type_name_spec[chan->type], full_postfix); break; } } if (name == NULL) { ret = -ENOMEM; goto error_free_full_postfix; } dev_attr->attr.name = name; if (readfunc) { dev_attr->attr.mode |= 0444; dev_attr->show = readfunc; } if (writefunc) { dev_attr->attr.mode |= 0200; dev_attr->store = writefunc; } error_free_full_postfix: kfree(full_postfix); return ret; } static void __iio_device_attr_deinit(struct device_attribute *dev_attr) { kfree(dev_attr->attr.name); } int __iio_add_chan_devattr(const char *postfix, struct iio_chan_spec const *chan, ssize_t (*readfunc)(struct device *dev, struct device_attribute *attr, char *buf), ssize_t (*writefunc)(struct device *dev, struct device_attribute *attr, const char *buf, size_t len), u64 mask, enum iio_shared_by shared_by, struct device *dev, struct iio_buffer *buffer, struct list_head *attr_list) { int ret; struct iio_dev_attr *iio_attr, *t; iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); if (iio_attr == NULL) return -ENOMEM; ret = __iio_device_attr_init(&iio_attr->dev_attr, postfix, chan, readfunc, writefunc, shared_by); if (ret) goto error_iio_dev_attr_free; iio_attr->c = chan; iio_attr->address = mask; iio_attr->buffer = buffer; list_for_each_entry(t, attr_list, l) if (strcmp(t->dev_attr.attr.name, iio_attr->dev_attr.attr.name) == 0) { if (shared_by == IIO_SEPARATE) dev_err(dev, "tried to double register : %s\n", t->dev_attr.attr.name); ret = -EBUSY; goto error_device_attr_deinit; } list_add(&iio_attr->l, attr_list); return 0; error_device_attr_deinit: __iio_device_attr_deinit(&iio_attr->dev_attr); error_iio_dev_attr_free: kfree(iio_attr); return ret; } static int iio_device_add_channel_label(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret; if (!indio_dev->info->read_label && !chan->extend_name) return 0; ret = __iio_add_chan_devattr("label", chan, &iio_read_channel_label, NULL, 0, IIO_SEPARATE, &indio_dev->dev, NULL, &iio_dev_opaque->channel_attr_list); if (ret < 0) return ret; return 1; } static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, enum iio_shared_by shared_by, const unsigned long *infomask) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret, attrcount = 0; for_each_set_bit(i, infomask, sizeof(*infomask)*8) { if (i >= ARRAY_SIZE(iio_chan_info_postfix)) return -EINVAL; ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], chan, &iio_read_channel_info, &iio_write_channel_info, i, shared_by, &indio_dev->dev, NULL, &iio_dev_opaque->channel_attr_list); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; if (ret < 0) return ret; attrcount++; } return attrcount; } static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, enum iio_shared_by shared_by, const unsigned long *infomask) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret, attrcount = 0; char *avail_postfix; for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { if (i >= ARRAY_SIZE(iio_chan_info_postfix)) return -EINVAL; avail_postfix = kasprintf(GFP_KERNEL, "%s_available", iio_chan_info_postfix[i]); if (!avail_postfix) return -ENOMEM; ret = __iio_add_chan_devattr(avail_postfix, chan, &iio_read_channel_info_avail, NULL, i, shared_by, &indio_dev->dev, NULL, &iio_dev_opaque->channel_attr_list); kfree(avail_postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; if (ret < 0) return ret; attrcount++; } return attrcount; } static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret, attrcount = 0; const struct iio_chan_spec_ext_info *ext_info; if (chan->channel < 0) return 0; ret = iio_device_add_info_mask_type(indio_dev, chan, IIO_SEPARATE, &chan->info_mask_separate); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type_avail(indio_dev, chan, IIO_SEPARATE, &chan->info_mask_separate_available); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type(indio_dev, chan, IIO_SHARED_BY_TYPE, &chan->info_mask_shared_by_type); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type_avail(indio_dev, chan, IIO_SHARED_BY_TYPE, &chan->info_mask_shared_by_type_available); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type(indio_dev, chan, IIO_SHARED_BY_DIR, &chan->info_mask_shared_by_dir); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type_avail(indio_dev, chan, IIO_SHARED_BY_DIR, &chan->info_mask_shared_by_dir_available); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type(indio_dev, chan, IIO_SHARED_BY_ALL, &chan->info_mask_shared_by_all); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_info_mask_type_avail(indio_dev, chan, IIO_SHARED_BY_ALL, &chan->info_mask_shared_by_all_available); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_channel_label(indio_dev, chan); if (ret < 0) return ret; attrcount += ret; if (chan->ext_info) { unsigned int i = 0; for (ext_info = chan->ext_info; ext_info->name; ext_info++) { ret = __iio_add_chan_devattr(ext_info->name, chan, ext_info->read ? &iio_read_channel_ext_info : NULL, ext_info->write ? &iio_write_channel_ext_info : NULL, i, ext_info->shared, &indio_dev->dev, NULL, &iio_dev_opaque->channel_attr_list); i++; if (ret == -EBUSY && ext_info->shared) continue; if (ret) return ret; attrcount++; } } return attrcount; } /** * iio_free_chan_devattr_list() - Free a list of IIO device attributes * @attr_list: List of IIO device attributes * * This function frees the memory allocated for each of the IIO device * attributes in the list. */ void iio_free_chan_devattr_list(struct list_head *attr_list) { struct iio_dev_attr *p, *n; list_for_each_entry_safe(p, n, attr_list, l) { kfree_const(p->dev_attr.attr.name); list_del(&p->l); kfree(p); } } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sysfs_emit(buf, "%s\n", indio_dev->name); } static DEVICE_ATTR_RO(name); static ssize_t label_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sysfs_emit(buf, "%s\n", indio_dev->label); } static DEVICE_ATTR_RO(label); static const char * const clock_names[] = { [CLOCK_REALTIME] = "realtime", [CLOCK_MONOTONIC] = "monotonic", [CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id", [CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id", [CLOCK_MONOTONIC_RAW] = "monotonic_raw", [CLOCK_REALTIME_COARSE] = "realtime_coarse", [CLOCK_MONOTONIC_COARSE] = "monotonic_coarse", [CLOCK_BOOTTIME] = "boottime", [CLOCK_REALTIME_ALARM] = "realtime_alarm", [CLOCK_BOOTTIME_ALARM] = "boottime_alarm", [CLOCK_SGI_CYCLE] = "sgi_cycle", [CLOCK_TAI] = "tai", }; static ssize_t current_timestamp_clock_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct iio_dev *indio_dev = dev_to_iio_dev(dev); const clockid_t clk = iio_device_get_clock(indio_dev); switch (clk) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_RAW: case CLOCK_REALTIME_COARSE: case CLOCK_MONOTONIC_COARSE: case CLOCK_BOOTTIME: case CLOCK_TAI: break; default: BUG(); } return sysfs_emit(buf, "%s\n", clock_names[clk]); } static ssize_t current_timestamp_clock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { clockid_t clk; int ret; ret = sysfs_match_string(clock_names, buf); if (ret < 0) return ret; clk = ret; switch (clk) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_RAW: case CLOCK_REALTIME_COARSE: case CLOCK_MONOTONIC_COARSE: case CLOCK_BOOTTIME: case CLOCK_TAI: break; default: return -EINVAL; } ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); if (ret) return ret; return len; } int iio_device_register_sysfs_group(struct iio_dev *indio_dev, const struct attribute_group *group) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); const struct attribute_group **new, **old = iio_dev_opaque->groups; unsigned int cnt = iio_dev_opaque->groupcounter; new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; new[iio_dev_opaque->groupcounter++] = group; new[iio_dev_opaque->groupcounter] = NULL; iio_dev_opaque->groups = new; return 0; } static DEVICE_ATTR_RW(current_timestamp_clock); static int iio_device_register_sysfs(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret = 0, attrcount, attrn, attrcount_orig = 0; struct iio_dev_attr *p; struct attribute **attr, *clk = NULL; /* First count elements in any existing group */ if (indio_dev->info->attrs) { attr = indio_dev->info->attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; /* * New channel registration method - relies on the fact a group does * not need to be initialized if its name is NULL. */ if (indio_dev->channels) for (i = 0; i < indio_dev->num_channels; i++) { const struct iio_chan_spec *chan = &indio_dev->channels[i]; if (chan->type == IIO_TIMESTAMP) clk = &dev_attr_current_timestamp_clock.attr; ret = iio_device_add_channel_sysfs(indio_dev, chan); if (ret < 0) goto error_clear_attrs; attrcount += ret; } if (iio_dev_opaque->event_interface) clk = &dev_attr_current_timestamp_clock.attr; if (indio_dev->name) attrcount++; if (indio_dev->label) attrcount++; if (clk) attrcount++; iio_dev_opaque->chan_attr_group.attrs = kcalloc(attrcount + 1, sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), GFP_KERNEL); if (iio_dev_opaque->chan_attr_group.attrs == NULL) { ret = -ENOMEM; goto error_clear_attrs; } /* Copy across original attributes, and point to original binary attributes */ if (indio_dev->info->attrs) { memcpy(iio_dev_opaque->chan_attr_group.attrs, indio_dev->info->attrs->attrs, sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) *attrcount_orig); iio_dev_opaque->chan_attr_group.is_visible = indio_dev->info->attrs->is_visible; iio_dev_opaque->chan_attr_group.bin_attrs = indio_dev->info->attrs->bin_attrs; } attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; if (indio_dev->name) iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; if (indio_dev->label) iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; if (clk) iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; ret = iio_device_register_sysfs_group(indio_dev, &iio_dev_opaque->chan_attr_group); if (ret) goto error_free_chan_attrs; return 0; error_free_chan_attrs: kfree(iio_dev_opaque->chan_attr_group.attrs); iio_dev_opaque->chan_attr_group.attrs = NULL; error_clear_attrs: iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); return ret; } static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); kfree(iio_dev_opaque->chan_attr_group.attrs); iio_dev_opaque->chan_attr_group.attrs = NULL; kfree(iio_dev_opaque->groups); iio_dev_opaque->groups = NULL; } static void iio_dev_release(struct device *device) { struct iio_dev *indio_dev = dev_to_iio_dev(device); struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) iio_device_unregister_trigger_consumer(indio_dev); iio_device_unregister_eventset(indio_dev); iio_device_unregister_sysfs(indio_dev); iio_device_detach_buffers(indio_dev); mutex_destroy(&iio_dev_opaque->info_exist_lock); mutex_destroy(&iio_dev_opaque->mlock); lockdep_unregister_key(&iio_dev_opaque->mlock_key); ida_free(&iio_ida, iio_dev_opaque->id); kfree(iio_dev_opaque); } const struct device_type iio_device_type = { .name = "iio_device", .release = iio_dev_release, }; /** * iio_device_alloc() - allocate an iio_dev from a driver * @parent: Parent device. * @sizeof_priv: Space to allocate for private structure. * * Returns: * Pointer to allocated iio_dev on success, NULL on failure. */ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev_opaque *iio_dev_opaque; struct iio_dev *indio_dev; size_t alloc_size; if (sizeof_priv) alloc_size = ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN) + sizeof_priv; else alloc_size = sizeof(*iio_dev_opaque); iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); if (!iio_dev_opaque) return NULL; indio_dev = &iio_dev_opaque->indio_dev; if (sizeof_priv) ACCESS_PRIVATE(indio_dev, priv) = (char *)iio_dev_opaque + ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN); INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); if (iio_dev_opaque->id < 0) { /* cannot use a dev_err as the name isn't available */ pr_err("failed to get device id\n"); kfree(iio_dev_opaque); return NULL; } if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { ida_free(&iio_ida, iio_dev_opaque->id); kfree(iio_dev_opaque); return NULL; } INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); lockdep_register_key(&iio_dev_opaque->mlock_key); mutex_init_with_key(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); mutex_init(&iio_dev_opaque->info_exist_lock); indio_dev->dev.parent = parent; indio_dev->dev.type = &iio_device_type; indio_dev->dev.bus = &iio_bus_type; device_initialize(&indio_dev->dev); return indio_dev; } EXPORT_SYMBOL(iio_device_alloc); /** * iio_device_free() - free an iio_dev from a driver * @dev: the iio_dev associated with the device */ void iio_device_free(struct iio_dev *dev) { if (dev) put_device(&dev->dev); } EXPORT_SYMBOL(iio_device_free); static void devm_iio_device_release(void *iio_dev) { iio_device_free(iio_dev); } /** * devm_iio_device_alloc - Resource-managed iio_device_alloc() * @parent: Device to allocate iio_dev for, and parent for this IIO device * @sizeof_priv: Space to allocate for private structure. * * Managed iio_device_alloc. iio_dev allocated with this function is * automatically freed on driver detach. * * Returns: * Pointer to allocated iio_dev on success, NULL on failure. */ struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev *iio_dev; int ret; iio_dev = iio_device_alloc(parent, sizeof_priv); if (!iio_dev) return NULL; ret = devm_add_action_or_reset(parent, devm_iio_device_release, iio_dev); if (ret) return NULL; return iio_dev; } EXPORT_SYMBOL_GPL(devm_iio_device_alloc); /** * iio_chrdev_open() - chrdev file open for buffer access and ioctls * @inode: Inode structure for identifying the device in the file system * @filp: File structure for iio device used to keep and later access * private data * * Returns: 0 on success or -EBUSY if the device is already opened */ static int iio_chrdev_open(struct inode *inode, struct file *filp) { struct iio_dev_opaque *iio_dev_opaque = container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; struct iio_dev_buffer_pair *ib; if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) return -EBUSY; iio_device_get(indio_dev); ib = kmalloc(sizeof(*ib), GFP_KERNEL); if (!ib) { iio_device_put(indio_dev); clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); return -ENOMEM; } ib->indio_dev = indio_dev; ib->buffer = indio_dev->buffer; filp->private_data = ib; return 0; } /** * iio_chrdev_release() - chrdev file close buffer access and ioctls * @inode: Inode structure pointer for the char device * @filp: File structure pointer for the char device * * Returns: 0 for successful release. */ static int iio_chrdev_release(struct inode *inode, struct file *filp) { struct iio_dev_buffer_pair *ib = filp->private_data; struct iio_dev_opaque *iio_dev_opaque = container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; kfree(ib); clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); iio_device_put(indio_dev); return 0; } void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, struct iio_ioctl_handler *h) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); } void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) { list_del(&h->entry); } static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct iio_dev_buffer_pair *ib = filp->private_data; struct iio_dev *indio_dev = ib->indio_dev; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_ioctl_handler *h; int ret; guard(mutex)(&iio_dev_opaque->info_exist_lock); /* * The NULL check here is required to prevent crashing when a device * is being removed while userspace would still have open file handles * to try to access this device. */ if (!indio_dev->info) return -ENODEV; list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { ret = h->ioctl(indio_dev, filp, cmd, arg); if (ret != IIO_IOCTL_UNHANDLED) return ret; } return -ENODEV; } static const struct file_operations iio_buffer_fileops = { .owner = THIS_MODULE, .llseek = noop_llseek, .read = iio_buffer_read_outer_addr, .write = iio_buffer_write_outer_addr, .poll = iio_buffer_poll_addr, .unlocked_ioctl = iio_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = iio_chrdev_open, .release = iio_chrdev_release, }; static const struct file_operations iio_event_fileops = { .owner = THIS_MODULE, .llseek = noop_llseek, .unlocked_ioctl = iio_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = iio_chrdev_open, .release = iio_chrdev_release, }; static int iio_check_unique_scan_index(struct iio_dev *indio_dev) { int i, j; const struct iio_chan_spec *channels = indio_dev->channels; if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) return 0; for (i = 0; i < indio_dev->num_channels - 1; i++) { if (channels[i].scan_index < 0) continue; for (j = i + 1; j < indio_dev->num_channels; j++) if (channels[i].scan_index == channels[j].scan_index) { dev_err(&indio_dev->dev, "Duplicate scan index %d\n", channels[i].scan_index); return -EINVAL; } } return 0; } static int iio_check_extended_name(const struct iio_dev *indio_dev) { unsigned int i; if (!indio_dev->info->read_label) return 0; for (i = 0; i < indio_dev->num_channels; i++) { if (indio_dev->channels[i].extend_name) { dev_err(&indio_dev->dev, "Cannot use labels and extend_name at the same time\n"); return -EINVAL; } } return 0; } static const struct iio_buffer_setup_ops noop_ring_setup_ops; static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev) { unsigned int num_masks, masklength, longs_per_mask; const unsigned long *av_masks; int i; av_masks = indio_dev->available_scan_masks; masklength = iio_get_masklength(indio_dev); longs_per_mask = BITS_TO_LONGS(masklength); /* * The code determining how many available_scan_masks is in the array * will be assuming the end of masks when first long with all bits * zeroed is encountered. This is incorrect for masks where mask * consists of more than one long, and where some of the available masks * has long worth of bits zeroed (but has subsequent bit(s) set). This * is a safety measure against bug where array of masks is terminated by * a single zero while mask width is greater than width of a long. */ if (longs_per_mask > 1) dev_warn(indio_dev->dev.parent, "multi long available scan masks not fully supported\n"); if (bitmap_empty(av_masks, masklength)) dev_warn(indio_dev->dev.parent, "empty scan mask\n"); for (num_masks = 0; *av_masks; num_masks++) av_masks += longs_per_mask; if (num_masks < 2) return; av_masks = indio_dev->available_scan_masks; /* * Go through all the masks from first to one before the last, and see * that no mask found later from the available_scan_masks array is a * subset of mask found earlier. If this happens, then the mask found * later will never get used because scanning the array is stopped when * the first suitable mask is found. Drivers should order the array of * available masks in the order of preference (presumably the least * costy to access masks first). */ for (i = 0; i < num_masks - 1; i++) { const unsigned long *mask1; int j; mask1 = av_masks + i * longs_per_mask; for (j = i + 1; j < num_masks; j++) { const unsigned long *mask2; mask2 = av_masks + j * longs_per_mask; if (bitmap_subset(mask2, mask1, masklength)) dev_warn(indio_dev->dev.parent, "available_scan_mask %d subset of %d. Never used\n", j, i); } } } /** * iio_active_scan_mask_index - Get index of the active scan mask inside the * available scan masks array * @indio_dev: the IIO device containing the active and available scan masks * * Returns: the index or -EINVAL if active_scan_mask is not set */ int iio_active_scan_mask_index(struct iio_dev *indio_dev) { const unsigned long *av_masks; unsigned int masklength = iio_get_masklength(indio_dev); int i = 0; if (!indio_dev->active_scan_mask) return -EINVAL; /* * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks, * the condition here do not handle multi-long masks correctly. * It only checks the first long to be zero, and will use such mask * as a terminator even if there was bits set after the first long. * * This should be fine since the available_scan_mask has already been * sanity tested using iio_sanity_check_avail_scan_masks. * * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for * more details */ av_masks = indio_dev->available_scan_masks; while (*av_masks) { if (indio_dev->active_scan_mask == av_masks) return i; av_masks += BITS_TO_LONGS(masklength); i++; } dev_warn(indio_dev->dev.parent, "active scan mask is not part of the available scan masks\n"); return -EINVAL; } EXPORT_SYMBOL_GPL(iio_active_scan_mask_index); int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct fwnode_handle *fwnode = NULL; int ret; if (!indio_dev->info) return -EINVAL; iio_dev_opaque->driver_module = this_mod; /* If the calling driver did not initialize firmware node, do it here */ if (dev_fwnode(&indio_dev->dev)) fwnode = dev_fwnode(&indio_dev->dev); /* The default dummy IIO device has no parent */ else if (indio_dev->dev.parent) fwnode = dev_fwnode(indio_dev->dev.parent); device_set_node(&indio_dev->dev, fwnode); fwnode_property_read_string(fwnode, "label", &indio_dev->label); ret = iio_check_unique_scan_index(indio_dev); if (ret < 0) return ret; ret = iio_check_extended_name(indio_dev); if (ret < 0) return ret; iio_device_register_debugfs(indio_dev); ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); if (ret) { dev_err(indio_dev->dev.parent, "Failed to create buffer sysfs interfaces\n"); goto error_unreg_debugfs; } if (indio_dev->available_scan_masks) iio_sanity_check_avail_scan_masks(indio_dev); ret = iio_device_register_sysfs(indio_dev); if (ret) { dev_err(indio_dev->dev.parent, "Failed to register sysfs interfaces\n"); goto error_buffer_free_sysfs; } ret = iio_device_register_eventset(indio_dev); if (ret) { dev_err(indio_dev->dev.parent, "Failed to register event set\n"); goto error_free_sysfs; } if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) iio_device_register_trigger_consumer(indio_dev); if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && indio_dev->setup_ops == NULL) indio_dev->setup_ops = &noop_ring_setup_ops; if (iio_dev_opaque->attached_buffers_cnt) cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); else if (iio_dev_opaque->event_interface) cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); iio_dev_opaque->chrdev.owner = this_mod; } /* assign device groups now; they should be all registered now */ indio_dev->dev.groups = iio_dev_opaque->groups; ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); if (ret < 0) goto error_unreg_eventset; return 0; error_unreg_eventset: iio_device_unregister_eventset(indio_dev); error_free_sysfs: iio_device_unregister_sysfs(indio_dev); error_buffer_free_sysfs: iio_buffers_free_sysfs_and_mask(indio_dev); error_unreg_debugfs: iio_device_unregister_debugfs(indio_dev); return ret; } EXPORT_SYMBOL(__iio_device_register); /** * iio_device_unregister() - unregister a device from the IIO subsystem * @indio_dev: Device structure representing the device. */ void iio_device_unregister(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); scoped_guard(mutex, &iio_dev_opaque->info_exist_lock) { iio_device_unregister_debugfs(indio_dev); iio_disable_all_buffers(indio_dev); indio_dev->info = NULL; iio_device_wakeup_eventset(indio_dev); iio_buffer_wakeup_poll(indio_dev); } iio_buffers_free_sysfs_and_mask(indio_dev); } EXPORT_SYMBOL(iio_device_unregister); static void devm_iio_device_unreg(void *indio_dev) { iio_device_unregister(indio_dev); } int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod) { int ret; ret = __iio_device_register(indio_dev, this_mod); if (ret) return ret; return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); } EXPORT_SYMBOL_GPL(__devm_iio_device_register); /** * __iio_device_claim_direct - Keep device in direct mode * @indio_dev: the iio_dev associated with the device * * If the device is in direct mode it is guaranteed to stay * that way until __iio_device_release_direct() is called. * * Use with __iio_device_release_direct(). * * Drivers should only call iio_device_claim_direct(). * * Returns: true on success, false on failure. */ bool __iio_device_claim_direct(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_enabled(indio_dev)) { mutex_unlock(&iio_dev_opaque->mlock); return false; } return true; } EXPORT_SYMBOL_GPL(__iio_device_claim_direct); /** * __iio_device_release_direct - releases claim on direct mode * @indio_dev: the iio_dev associated with the device * * Release the claim. Device is no longer guaranteed to stay * in direct mode. * * Drivers should only call iio_device_release_direct(). * * Use with __iio_device_claim_direct() */ void __iio_device_release_direct(struct iio_dev *indio_dev) { mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); } EXPORT_SYMBOL_GPL(__iio_device_release_direct); /** * iio_device_claim_buffer_mode - Keep device in buffer mode * @indio_dev: the iio_dev associated with the device * * If the device is in buffer mode it is guaranteed to stay * that way until iio_device_release_buffer_mode() is called. * * Use with iio_device_release_buffer_mode(). * * Returns: 0 on success, -EBUSY on failure. */ int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_enabled(indio_dev)) return 0; mutex_unlock(&iio_dev_opaque->mlock); return -EBUSY; } EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); /** * iio_device_release_buffer_mode - releases claim on buffer mode * @indio_dev: the iio_dev associated with the device * * Release the claim. Device is no longer guaranteed to stay * in buffer mode. * * Use with iio_device_claim_buffer_mode(). */ void iio_device_release_buffer_mode(struct iio_dev *indio_dev) { mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); } EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); /** * iio_device_get_current_mode() - helper function providing read-only access to * the opaque @currentmode variable * @indio_dev: IIO device structure for device */ int iio_device_get_current_mode(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); return iio_dev_opaque->currentmode; } EXPORT_SYMBOL_GPL(iio_device_get_current_mode); subsys_initcall(iio_init); module_exit(iio_exit); MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); MODULE_DESCRIPTION("Industrial I/O core"); MODULE_LICENSE("GPL"); |
| 20 24 4 3 3 3 2 1 1 4 4 24 24 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/usb.h> #include <linux/usb/hcd.h> #include "usb.h" static int uas_is_interface(struct usb_host_interface *intf) { return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE && intf->desc.bInterfaceSubClass == USB_SC_SCSI && intf->desc.bInterfaceProtocol == USB_PR_UAS); } static struct usb_host_interface *uas_find_uas_alt_setting( struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt; } return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, struct usb_host_endpoint *eps[]) { struct usb_host_endpoint *endpoint = alt->endpoint; unsigned i, n_endpoints = alt->desc.bNumEndpoints; for (i = 0; i < n_endpoints; i++) { unsigned char *extra = endpoint[i].extra; int len = endpoint[i].extralen; while (len >= 3) { if (extra[1] == USB_DT_PIPE_USAGE) { unsigned pipe_id = extra[2]; if (pipe_id > 0 && pipe_id < 5) eps[pipe_id - 1] = &endpoint[i]; break; } len -= extra[0]; extra += extra[0]; } } if (!eps[0] || !eps[1] || !eps[2] || !eps[3]) return -ENODEV; return 0; } static int uas_use_uas_driver(struct usb_interface *intf, const struct usb_device_id *id, u64 *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); u64 flags = id->driver_info; struct usb_host_interface *alt; int r; alt = uas_find_uas_alt_setting(intf); if (!alt) return 0; r = uas_find_endpoints(alt, eps); if (r < 0) return 0; /* * ASMedia has a number of usb3 to sata bridge chips, at the time of * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the * entire line, so the device-id is useless to determine if we're * dealing with an ASM1051 (which we want to avoid). * * The ASM1153 can be identified by config.MaxPower == 0, * where as the ASM105x models have config.MaxPower == 36. * * Differentiating between the ASM1053 and ASM1051 is trickier, when * connected over USB-3 we can look at the number of streams supported, * ASM1051 supports 32 streams, where as early ASM1053 versions support * 16 streams, newer ASM1053-s also support 32 streams, but have a * different prod-id. * * (*) ASM1051 chips do work with UAS with some disks (with the * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { if (udev->actconfig->desc.bMaxPower == 0) { /* ASM1153, do nothing */ } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } else { /* ASM1053, these have issues with large transfers */ flags |= US_FL_MAX_SECTORS_240; } } /* All Seagate disk enclosures have broken ATA pass-through support */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) flags |= US_FL_NO_ATA_1X; /* * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues * with UAS. This isn't distinguishable with just idVendor and * idProduct, use manufacturer and product too. * * Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda && le16_to_cpu(udev->descriptor.idProduct) == 0x9210 && (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) && (udev->product && !strcmp(udev->product, "MD202"))) flags |= US_FL_IGNORE_UAS; usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { dev_warn(&udev->dev, "UAS is ignored for this device, using usb-storage instead\n"); return 0; } if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", hcd->driver->description); dev_warn(&udev->dev, "required by the UAS driver. Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams) { dev_warn(&udev->dev, "USB controller %s does not support streams, which are required by the UAS driver.\n", hcd_to_bus(hcd)->bus_name); dev_warn(&udev->dev, "Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (flags_ret) *flags_ret = flags; return 1; } |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 | // SPDX-License-Identifier: GPL-2.0-or-later /* * SPCA506 chip based cameras function * M Xhaard 15/04/2004 based on different work Mark Taylor and others * and my own snoopy file on a pv-321c donate by a german compagny * "Firma Frank Gmbh" from Saarbruecken * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ #define MODULE_NAME "spca506" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA506 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ char norme; char channel; }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 5}, {176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 4}, {320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define SPCA50X_OFFSET_DATA 10 #define SAA7113_bright 0x0a /* defaults 0x80 */ #define SAA7113_contrast 0x0b /* defaults 0x47 */ #define SAA7113_saturation 0x0c /* defaults 0x40 */ #define SAA7113_hue 0x0d /* defaults 0x00 */ #define SAA7113_I2C_BASE_WRITE 0x4a /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 req, __u16 index, __u16 length) { usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, length, 500); } static void reg_w(struct usb_device *dev, __u16 req, __u16 value, __u16 index) { usb_control_msg(dev, usb_sndctrlpipe(dev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); } static void spca506_Initi2c(struct gspca_dev *gspca_dev) { reg_w(gspca_dev->dev, 0x07, SAA7113_I2C_BASE_WRITE, 0x0004); } static void spca506_WriteI2c(struct gspca_dev *gspca_dev, __u16 valeur, __u16 reg) { int retry = 60; reg_w(gspca_dev->dev, 0x07, reg, 0x0001); reg_w(gspca_dev->dev, 0x07, valeur, 0x0000); while (retry--) { reg_r(gspca_dev, 0x07, 0x0003, 2); if ((gspca_dev->usb_buf[0] | gspca_dev->usb_buf[1]) == 0x00) break; } } static void spca506_SetNormeInput(struct gspca_dev *gspca_dev, __u16 norme, __u16 channel) { struct sd *sd = (struct sd *) gspca_dev; /* fixme: check if channel == 0..3 and 6..9 (8 values) */ __u8 setbit0 = 0x00; __u8 setbit1 = 0x00; __u8 videomask = 0x00; gspca_dbg(gspca_dev, D_STREAM, "** Open Set Norme **\n"); spca506_Initi2c(gspca_dev); /* NTSC bit0 -> 1(525 l) PAL SECAM bit0 -> 0 (625 l) */ /* Composite channel bit1 -> 1 S-video bit 1 -> 0 */ /* and exclude SAA7113 reserved channel set default 0 otherwise */ if (norme & V4L2_STD_NTSC) setbit0 = 0x01; if (channel == 4 || channel == 5 || channel > 9) channel = 0; if (channel < 4) setbit1 = 0x02; videomask = (0x48 | setbit0 | setbit1); reg_w(gspca_dev->dev, 0x08, videomask, 0x0000); spca506_WriteI2c(gspca_dev, (0xc0 | (channel & 0x0F)), 0x02); if (norme & V4L2_STD_NTSC) spca506_WriteI2c(gspca_dev, 0x33, 0x0e); /* Chrominance Control NTSC N */ else if (norme & V4L2_STD_SECAM) spca506_WriteI2c(gspca_dev, 0x53, 0x0e); /* Chrominance Control SECAM */ else spca506_WriteI2c(gspca_dev, 0x03, 0x0e); /* Chrominance Control PAL BGHIV */ sd->norme = norme; sd->channel = channel; gspca_dbg(gspca_dev, D_STREAM, "Set Video Byte to 0x%2x\n", videomask); gspca_dbg(gspca_dev, D_STREAM, "Set Norme: %08x Channel %d", norme, channel); } static void spca506_GetNormeInput(struct gspca_dev *gspca_dev, __u16 *norme, __u16 *channel) { struct sd *sd = (struct sd *) gspca_dev; /* Read the register is not so good value change so we use your own copy in spca50x struct */ *norme = sd->norme; *channel = sd->channel; gspca_dbg(gspca_dev, D_STREAM, "Get Norme: %d Channel %d\n", *norme, *channel); } static void spca506_Setsize(struct gspca_dev *gspca_dev, __u16 code, __u16 xmult, __u16 ymult) { struct usb_device *dev = gspca_dev->dev; gspca_dbg(gspca_dev, D_STREAM, "** SetSize **\n"); reg_w(dev, 0x04, (0x18 | (code & 0x07)), 0x0000); /* Soft snap 0x40 Hard 0x41 */ reg_w(dev, 0x04, 0x41, 0x0001); reg_w(dev, 0x04, 0x00, 0x0002); /* reserved */ reg_w(dev, 0x04, 0x00, 0x0003); /* reserved */ reg_w(dev, 0x04, 0x00, 0x0004); /* reserved */ reg_w(dev, 0x04, 0x01, 0x0005); /* reserced */ reg_w(dev, 0x04, xmult, 0x0006); /* reserved */ reg_w(dev, 0x04, ymult, 0x0007); /* compression 1 */ reg_w(dev, 0x04, 0x00, 0x0008); /* T=64 -> 2 */ reg_w(dev, 0x04, 0x00, 0x0009); /* threshold2D */ reg_w(dev, 0x04, 0x21, 0x000a); /* quantization */ reg_w(dev, 0x04, 0x00, 0x000b); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0xFF, 0x0003); reg_w(dev, 0x03, 0x00, 0x0000); reg_w(dev, 0x03, 0x1c, 0x0001); reg_w(dev, 0x03, 0x18, 0x0001); /* Init on PAL and composite input0 */ spca506_SetNormeInput(gspca_dev, 0, 0); reg_w(dev, 0x03, 0x1c, 0x0001); reg_w(dev, 0x03, 0x18, 0x0001); reg_w(dev, 0x05, 0x00, 0x0000); reg_w(dev, 0x05, 0xef, 0x0001); reg_w(dev, 0x05, 0x00, 0x00c1); reg_w(dev, 0x05, 0x00, 0x00c2); reg_w(dev, 0x06, 0x18, 0x0002); reg_w(dev, 0x06, 0xf5, 0x0011); reg_w(dev, 0x06, 0x02, 0x0012); reg_w(dev, 0x06, 0xfb, 0x0013); reg_w(dev, 0x06, 0x00, 0x0014); reg_w(dev, 0x06, 0xa4, 0x0051); reg_w(dev, 0x06, 0x40, 0x0052); reg_w(dev, 0x06, 0x71, 0x0053); reg_w(dev, 0x06, 0x40, 0x0054); /************************************************/ reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0x00, 0x0003); reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0xFF, 0x0003); reg_w(dev, 0x02, 0x00, 0x0000); reg_w(dev, 0x03, 0x60, 0x0000); reg_w(dev, 0x03, 0x18, 0x0001); /* for a better reading mx :) */ /*sdca506_WriteI2c(value,register) */ spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, 0x08, 0x01); spca506_WriteI2c(gspca_dev, 0xc0, 0x02); /* input composite video */ spca506_WriteI2c(gspca_dev, 0x33, 0x03); spca506_WriteI2c(gspca_dev, 0x00, 0x04); spca506_WriteI2c(gspca_dev, 0x00, 0x05); spca506_WriteI2c(gspca_dev, 0x0d, 0x06); spca506_WriteI2c(gspca_dev, 0xf0, 0x07); spca506_WriteI2c(gspca_dev, 0x98, 0x08); spca506_WriteI2c(gspca_dev, 0x03, 0x09); spca506_WriteI2c(gspca_dev, 0x80, 0x0a); spca506_WriteI2c(gspca_dev, 0x47, 0x0b); spca506_WriteI2c(gspca_dev, 0x48, 0x0c); spca506_WriteI2c(gspca_dev, 0x00, 0x0d); spca506_WriteI2c(gspca_dev, 0x03, 0x0e); /* Chroma Pal adjust */ spca506_WriteI2c(gspca_dev, 0x2a, 0x0f); spca506_WriteI2c(gspca_dev, 0x00, 0x10); spca506_WriteI2c(gspca_dev, 0x0c, 0x11); spca506_WriteI2c(gspca_dev, 0xb8, 0x12); spca506_WriteI2c(gspca_dev, 0x01, 0x13); spca506_WriteI2c(gspca_dev, 0x00, 0x14); spca506_WriteI2c(gspca_dev, 0x00, 0x15); spca506_WriteI2c(gspca_dev, 0x00, 0x16); spca506_WriteI2c(gspca_dev, 0x00, 0x17); spca506_WriteI2c(gspca_dev, 0x00, 0x18); spca506_WriteI2c(gspca_dev, 0x00, 0x19); spca506_WriteI2c(gspca_dev, 0x00, 0x1a); spca506_WriteI2c(gspca_dev, 0x00, 0x1b); spca506_WriteI2c(gspca_dev, 0x00, 0x1c); spca506_WriteI2c(gspca_dev, 0x00, 0x1d); spca506_WriteI2c(gspca_dev, 0x00, 0x1e); spca506_WriteI2c(gspca_dev, 0xa1, 0x1f); spca506_WriteI2c(gspca_dev, 0x02, 0x40); spca506_WriteI2c(gspca_dev, 0xff, 0x41); spca506_WriteI2c(gspca_dev, 0xff, 0x42); spca506_WriteI2c(gspca_dev, 0xff, 0x43); spca506_WriteI2c(gspca_dev, 0xff, 0x44); spca506_WriteI2c(gspca_dev, 0xff, 0x45); spca506_WriteI2c(gspca_dev, 0xff, 0x46); spca506_WriteI2c(gspca_dev, 0xff, 0x47); spca506_WriteI2c(gspca_dev, 0xff, 0x48); spca506_WriteI2c(gspca_dev, 0xff, 0x49); spca506_WriteI2c(gspca_dev, 0xff, 0x4a); spca506_WriteI2c(gspca_dev, 0xff, 0x4b); spca506_WriteI2c(gspca_dev, 0xff, 0x4c); spca506_WriteI2c(gspca_dev, 0xff, 0x4d); spca506_WriteI2c(gspca_dev, 0xff, 0x4e); spca506_WriteI2c(gspca_dev, 0xff, 0x4f); spca506_WriteI2c(gspca_dev, 0xff, 0x50); spca506_WriteI2c(gspca_dev, 0xff, 0x51); spca506_WriteI2c(gspca_dev, 0xff, 0x52); spca506_WriteI2c(gspca_dev, 0xff, 0x53); spca506_WriteI2c(gspca_dev, 0xff, 0x54); spca506_WriteI2c(gspca_dev, 0xff, 0x55); spca506_WriteI2c(gspca_dev, 0xff, 0x56); spca506_WriteI2c(gspca_dev, 0xff, 0x57); spca506_WriteI2c(gspca_dev, 0x00, 0x58); spca506_WriteI2c(gspca_dev, 0x54, 0x59); spca506_WriteI2c(gspca_dev, 0x07, 0x5a); spca506_WriteI2c(gspca_dev, 0x83, 0x5b); spca506_WriteI2c(gspca_dev, 0x00, 0x5c); spca506_WriteI2c(gspca_dev, 0x00, 0x5d); spca506_WriteI2c(gspca_dev, 0x00, 0x5e); spca506_WriteI2c(gspca_dev, 0x00, 0x5f); spca506_WriteI2c(gspca_dev, 0x00, 0x60); spca506_WriteI2c(gspca_dev, 0x05, 0x61); spca506_WriteI2c(gspca_dev, 0x9f, 0x62); gspca_dbg(gspca_dev, D_STREAM, "** Close Init *\n"); return 0; } static int sd_start(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; __u16 norme; __u16 channel; /**************************************/ reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0x00, 0x0003); reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0xFF, 0x0003); reg_w(dev, 0x02, 0x00, 0x0000); reg_w(dev, 0x03, 0x60, 0x0000); reg_w(dev, 0x03, 0x18, 0x0001); /*sdca506_WriteI2c(value,register) */ spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, 0x08, 0x01); /* Increment Delay */ /* spca506_WriteI2c(gspca_dev, 0xc0, 0x02); * Analog Input Control 1 */ spca506_WriteI2c(gspca_dev, 0x33, 0x03); /* Analog Input Control 2 */ spca506_WriteI2c(gspca_dev, 0x00, 0x04); /* Analog Input Control 3 */ spca506_WriteI2c(gspca_dev, 0x00, 0x05); /* Analog Input Control 4 */ spca506_WriteI2c(gspca_dev, 0x0d, 0x06); /* Horizontal Sync Start 0xe9-0x0d */ spca506_WriteI2c(gspca_dev, 0xf0, 0x07); /* Horizontal Sync Stop 0x0d-0xf0 */ spca506_WriteI2c(gspca_dev, 0x98, 0x08); /* Sync Control */ /* Defaults value */ spca506_WriteI2c(gspca_dev, 0x03, 0x09); /* Luminance Control */ spca506_WriteI2c(gspca_dev, 0x80, 0x0a); /* Luminance Brightness */ spca506_WriteI2c(gspca_dev, 0x47, 0x0b); /* Luminance Contrast */ spca506_WriteI2c(gspca_dev, 0x48, 0x0c); /* Chrominance Saturation */ spca506_WriteI2c(gspca_dev, 0x00, 0x0d); /* Chrominance Hue Control */ spca506_WriteI2c(gspca_dev, 0x2a, 0x0f); /* Chrominance Gain Control */ /**************************************/ spca506_WriteI2c(gspca_dev, 0x00, 0x10); /* Format/Delay Control */ spca506_WriteI2c(gspca_dev, 0x0c, 0x11); /* Output Control 1 */ spca506_WriteI2c(gspca_dev, 0xb8, 0x12); /* Output Control 2 */ spca506_WriteI2c(gspca_dev, 0x01, 0x13); /* Output Control 3 */ spca506_WriteI2c(gspca_dev, 0x00, 0x14); /* reserved */ spca506_WriteI2c(gspca_dev, 0x00, 0x15); /* VGATE START */ spca506_WriteI2c(gspca_dev, 0x00, 0x16); /* VGATE STOP */ spca506_WriteI2c(gspca_dev, 0x00, 0x17); /* VGATE Control (MSB) */ spca506_WriteI2c(gspca_dev, 0x00, 0x18); spca506_WriteI2c(gspca_dev, 0x00, 0x19); spca506_WriteI2c(gspca_dev, 0x00, 0x1a); spca506_WriteI2c(gspca_dev, 0x00, 0x1b); spca506_WriteI2c(gspca_dev, 0x00, 0x1c); spca506_WriteI2c(gspca_dev, 0x00, 0x1d); spca506_WriteI2c(gspca_dev, 0x00, 0x1e); spca506_WriteI2c(gspca_dev, 0xa1, 0x1f); spca506_WriteI2c(gspca_dev, 0x02, 0x40); spca506_WriteI2c(gspca_dev, 0xff, 0x41); spca506_WriteI2c(gspca_dev, 0xff, 0x42); spca506_WriteI2c(gspca_dev, 0xff, 0x43); spca506_WriteI2c(gspca_dev, 0xff, 0x44); spca506_WriteI2c(gspca_dev, 0xff, 0x45); spca506_WriteI2c(gspca_dev, 0xff, 0x46); spca506_WriteI2c(gspca_dev, 0xff, 0x47); spca506_WriteI2c(gspca_dev, 0xff, 0x48); spca506_WriteI2c(gspca_dev, 0xff, 0x49); spca506_WriteI2c(gspca_dev, 0xff, 0x4a); spca506_WriteI2c(gspca_dev, 0xff, 0x4b); spca506_WriteI2c(gspca_dev, 0xff, 0x4c); spca506_WriteI2c(gspca_dev, 0xff, 0x4d); spca506_WriteI2c(gspca_dev, 0xff, 0x4e); spca506_WriteI2c(gspca_dev, 0xff, 0x4f); spca506_WriteI2c(gspca_dev, 0xff, 0x50); spca506_WriteI2c(gspca_dev, 0xff, 0x51); spca506_WriteI2c(gspca_dev, 0xff, 0x52); spca506_WriteI2c(gspca_dev, 0xff, 0x53); spca506_WriteI2c(gspca_dev, 0xff, 0x54); spca506_WriteI2c(gspca_dev, 0xff, 0x55); spca506_WriteI2c(gspca_dev, 0xff, 0x56); spca506_WriteI2c(gspca_dev, 0xff, 0x57); spca506_WriteI2c(gspca_dev, 0x00, 0x58); spca506_WriteI2c(gspca_dev, 0x54, 0x59); spca506_WriteI2c(gspca_dev, 0x07, 0x5a); spca506_WriteI2c(gspca_dev, 0x83, 0x5b); spca506_WriteI2c(gspca_dev, 0x00, 0x5c); spca506_WriteI2c(gspca_dev, 0x00, 0x5d); spca506_WriteI2c(gspca_dev, 0x00, 0x5e); spca506_WriteI2c(gspca_dev, 0x00, 0x5f); spca506_WriteI2c(gspca_dev, 0x00, 0x60); spca506_WriteI2c(gspca_dev, 0x05, 0x61); spca506_WriteI2c(gspca_dev, 0x9f, 0x62); /**************************************/ reg_w(dev, 0x05, 0x00, 0x0003); reg_w(dev, 0x05, 0x00, 0x0004); reg_w(dev, 0x03, 0x10, 0x0001); reg_w(dev, 0x03, 0x78, 0x0000); switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { case 0: spca506_Setsize(gspca_dev, 0, 0x10, 0x10); break; case 1: spca506_Setsize(gspca_dev, 1, 0x1a, 0x1a); break; case 2: spca506_Setsize(gspca_dev, 2, 0x1c, 0x1c); break; case 4: spca506_Setsize(gspca_dev, 4, 0x34, 0x34); break; default: /* case 5: */ spca506_Setsize(gspca_dev, 5, 0x40, 0x40); break; } /* compress setting and size */ /* set i2c luma */ reg_w(dev, 0x02, 0x01, 0x0000); reg_w(dev, 0x03, 0x12, 0x0000); reg_r(gspca_dev, 0x04, 0x0001, 2); gspca_dbg(gspca_dev, D_STREAM, "webcam started\n"); spca506_GetNormeInput(gspca_dev, &norme, &channel); spca506_SetNormeInput(gspca_dev, norme, channel); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; reg_w(dev, 0x02, 0x00, 0x0000); reg_w(dev, 0x03, 0x00, 0x0004); reg_w(dev, 0x03, 0x00, 0x0003); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { switch (data[0]) { case 0: /* start of frame */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); data += SPCA50X_OFFSET_DATA; len -= SPCA50X_OFFSET_DATA; gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); break; case 0xff: /* drop */ /* gspca_dev->last_packet_type = DISCARD_PACKET; */ break; default: data += 1; len -= 1; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); break; } } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, val, SAA7113_bright); spca506_WriteI2c(gspca_dev, 0x01, 0x09); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, val, SAA7113_contrast); spca506_WriteI2c(gspca_dev, 0x01, 0x09); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, val, SAA7113_saturation); spca506_WriteI2c(gspca_dev, 0x01, 0x09); } static void sethue(struct gspca_dev *gspca_dev, s32 val) { spca506_Initi2c(gspca_dev); spca506_WriteI2c(gspca_dev, val, SAA7113_hue); spca506_WriteI2c(gspca_dev, 0x01, 0x09); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_HUE: sethue(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 0x47); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 0x40); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 0, 255, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /* {USB_DEVICE(0x0733, 0x0430)}, FIXME: may be IntelPCCameraPro BRIDGE_SPCA505 */ {USB_DEVICE(0x0734, 0x043b)}, {USB_DEVICE(0x99fa, 0x8988)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
| 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/device.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/stddef.h> struct mdiobus_devres { struct mii_bus *mii; }; static void devm_mdiobus_free(struct device *dev, void *this) { struct mdiobus_devres *dr = this; mdiobus_free(dr->mii); } /** * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() * @dev: Device to allocate mii_bus for * @sizeof_priv: Space to allocate for private structure * * Managed mdiobus_alloc_size. mii_bus allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated mii_bus on success, NULL on out-of-memory error. */ struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) { struct mdiobus_devres *dr; dr = devres_alloc(devm_mdiobus_free, sizeof(*dr), GFP_KERNEL); if (!dr) return NULL; dr->mii = mdiobus_alloc_size(sizeof_priv); if (!dr->mii) { devres_free(dr); return NULL; } devres_add(dev, dr); return dr->mii; } EXPORT_SYMBOL(devm_mdiobus_alloc_size); static void devm_mdiobus_unregister(struct device *dev, void *this) { struct mdiobus_devres *dr = this; mdiobus_unregister(dr->mii); } static int mdiobus_devres_match(struct device *dev, void *this, void *match_data) { struct mdiobus_devres *res = this; struct mii_bus *mii = match_data; return mii == res->mii; } /** * __devm_mdiobus_register - Resource-managed variant of mdiobus_register() * @dev: Device to register mii_bus for * @bus: MII bus structure to register * @owner: Owning module * * Returns 0 on success, negative error number on failure. */ int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, struct module *owner) { struct mdiobus_devres *dr; int ret; if (WARN_ON(!devres_find(dev, devm_mdiobus_free, mdiobus_devres_match, bus))) return -EINVAL; dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = __mdiobus_register(bus, owner); if (ret) { devres_free(dr); return ret; } dr->mii = bus; devres_add(dev, dr); return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); #if IS_ENABLED(CONFIG_OF_MDIO) /** * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() * @dev: Device to register mii_bus for * @mdio: MII bus structure to register * @np: Device node to parse * @owner: Owning module */ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, struct device_node *np, struct module *owner) { struct mdiobus_devres *dr; int ret; if (WARN_ON(!devres_find(dev, devm_mdiobus_free, mdiobus_devres_match, mdio))) return -EINVAL; dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = __of_mdiobus_register(mdio, np, owner); if (ret) { devres_free(dr); return ret; } dr->mii = mdio; devres_add(dev, dr); return 0; } EXPORT_SYMBOL(__devm_of_mdiobus_register); #endif /* CONFIG_OF_MDIO */ MODULE_DESCRIPTION("Network MDIO bus devres helpers"); MODULE_LICENSE("GPL"); |
| 123 5 121 124 123 5 5 5 150 189 149 189 5 5 5 5 5 5 5 5 5 5 5 5 5 5 123 124 240 124 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMAP_LOCK_H #define _LINUX_MMAP_LOCK_H /* Avoid a dependency loop by declaring here. */ extern int rcuwait_wake_up(struct rcuwait *w); #include <linux/lockdep.h> #include <linux/mm_types.h> #include <linux/mmdebug.h> #include <linux/rwsem.h> #include <linux/tracepoint-defs.h> #include <linux/types.h> #include <linux/cleanup.h> #include <linux/sched/mm.h> #define MMAP_LOCK_INITIALIZER(name) \ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), DECLARE_TRACEPOINT(mmap_lock_start_locking); DECLARE_TRACEPOINT(mmap_lock_acquire_returned); DECLARE_TRACEPOINT(mmap_lock_released); #ifdef CONFIG_TRACING void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, bool success); void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, bool write) { if (tracepoint_enabled(mmap_lock_start_locking)) __mmap_lock_do_trace_start_locking(mm, write); } static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, bool write, bool success) { if (tracepoint_enabled(mmap_lock_acquire_returned)) __mmap_lock_do_trace_acquire_returned(mm, write, success); } static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) { if (tracepoint_enabled(mmap_lock_released)) __mmap_lock_do_trace_released(mm, write); } #else /* !CONFIG_TRACING */ static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, bool write) { } static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, bool write, bool success) { } static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) { } #endif /* CONFIG_TRACING */ static inline void mmap_assert_locked(const struct mm_struct *mm) { rwsem_assert_held(&mm->mmap_lock); } static inline void mmap_assert_write_locked(const struct mm_struct *mm) { rwsem_assert_held_write(&mm->mmap_lock); } #ifdef CONFIG_PER_VMA_LOCK static inline void mm_lock_seqcount_init(struct mm_struct *mm) { seqcount_init(&mm->mm_lock_seq); } static inline void mm_lock_seqcount_begin(struct mm_struct *mm) { do_raw_write_seqcount_begin(&mm->mm_lock_seq); } static inline void mm_lock_seqcount_end(struct mm_struct *mm) { ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq); do_raw_write_seqcount_end(&mm->mm_lock_seq); } static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) { /* * Since mmap_lock is a sleeping lock, and waiting for it to become * unlocked is more or less equivalent with taking it ourselves, don't * bother with the speculative path if mmap_lock is already write-locked * and take the slow path, which takes the lock. */ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); } static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) { return read_seqcount_retry(&mm->mm_lock_seq, seq); } static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) { #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key lockdep_key; lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); #endif if (reset_refcnt) refcount_set(&vma->vm_refcnt, 0); vma->vm_lock_seq = UINT_MAX; } static inline bool is_vma_writer_only(int refcnt) { /* * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on * a detached vma happens only in vma_mark_detached() and is a rare * case, therefore most of the time there will be no unnecessary wakeup. */ return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1; } static inline void vma_refcount_put(struct vm_area_struct *vma) { /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ struct mm_struct *mm = vma->vm_mm; int oldcnt; rwsem_release(&vma->vmlock_dep_map, _RET_IP_); if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { if (is_vma_writer_only(oldcnt - 1)) rcuwait_wake_up(&mm->vma_writer_wait); } } /* * Use only while holding mmap read lock which guarantees that locking will not * fail (nobody can concurrently write-lock the vma). vma_start_read() should * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { int oldcnt; mmap_assert_locked(vma->vm_mm); if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, VMA_REF_LIMIT))) return false; rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); return true; } /* * Use only while holding mmap read lock which guarantees that locking will not * fail (nobody can concurrently write-lock the vma). vma_start_read() should * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ static inline bool vma_start_read_locked(struct vm_area_struct *vma) { return vma_start_read_locked_nested(vma, 0); } static inline void vma_end_read(struct vm_area_struct *vma) { vma_refcount_put(vma); } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); /* * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence; return (vma->vm_lock_seq == *mm_lock_seq); } int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq, int state); /* * Begin writing to a VMA. * Exclude concurrent readers under the per-VMA lock until the currently * write-locked mmap_lock is dropped or downgraded. */ static inline void vma_start_write(struct vm_area_struct *vma) { unsigned int mm_lock_seq; if (__is_vma_write_locked(vma, &mm_lock_seq)) return; __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE); } /** * vma_start_write_killable - Begin writing to a VMA. * @vma: The VMA we are going to modify. * * Exclude concurrent readers under the per-VMA lock until the currently * write-locked mmap_lock is dropped or downgraded. * * Context: May sleep while waiting for readers to drop the vma read lock. * Caller must already hold the mmap_lock for write. * * Return: 0 for a successful acquisition. -EINTR if a fatal signal was * received. */ static inline __must_check int vma_start_write_killable(struct vm_area_struct *vma) { unsigned int mm_lock_seq; if (__is_vma_write_locked(vma, &mm_lock_seq)) return 0; return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) { unsigned int mm_lock_seq; VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } static inline void vma_assert_locked(struct vm_area_struct *vma) { unsigned int mm_lock_seq; VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 && !__is_vma_write_locked(vma, &mm_lock_seq), vma); } /* * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these * assertions should be made either under mmap_write_lock or when the object * has been isolated under mmap_write_lock, ensuring no competing writers. */ static inline void vma_assert_attached(struct vm_area_struct *vma) { WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); } static inline void vma_assert_detached(struct vm_area_struct *vma) { WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); } static inline void vma_mark_attached(struct vm_area_struct *vma) { vma_assert_write_locked(vma); vma_assert_detached(vma); refcount_set_release(&vma->vm_refcnt, 1); } void vma_mark_detached(struct vm_area_struct *vma); struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); /* * Locks next vma pointed by the iterator. Confirms the locked vma has not * been modified and will retry under mmap_lock protection if modification * was detected. Should be called from read RCU section. * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the * process was interrupted. */ struct vm_area_struct *lock_next_vma(struct mm_struct *mm, struct vma_iterator *iter, unsigned long address); #else /* CONFIG_PER_VMA_LOCK */ static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) { return false; } static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) { return true; } static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline __must_check int vma_start_write_killable(struct vm_area_struct *vma) { return 0; } static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } static inline void vma_assert_attached(struct vm_area_struct *vma) {} static inline void vma_assert_detached(struct vm_area_struct *vma) {} static inline void vma_mark_attached(struct vm_area_struct *vma) {} static inline void vma_mark_detached(struct vm_area_struct *vma) {} static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) { return NULL; } static inline void vma_assert_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); } #endif /* CONFIG_PER_VMA_LOCK */ static inline void mmap_write_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, true); down_write(&mm->mmap_lock); mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, true); } static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) { __mmap_lock_trace_start_locking(mm, true); down_write_nested(&mm->mmap_lock, subclass); mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, true); } static inline int mmap_write_lock_killable(struct mm_struct *mm) { int ret; __mmap_lock_trace_start_locking(mm, true); ret = down_write_killable(&mm->mmap_lock); if (!ret) mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, ret == 0); return ret; } /* * Drop all currently-held per-VMA locks. * This is called from the mmap_lock implementation directly before releasing * a write-locked mmap_lock (or downgrading it to read-locked). * This should normally NOT be called manually from other places. * If you want to call this manually anyway, keep in mind that this will release * *all* VMA write locks, including ones from further up the stack. */ static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); mm_lock_seqcount_end(mm); } static inline void mmap_write_unlock(struct mm_struct *mm) { __mmap_lock_trace_released(mm, true); vma_end_write_all(mm); up_write(&mm->mmap_lock); } static inline void mmap_write_downgrade(struct mm_struct *mm) { __mmap_lock_trace_acquire_returned(mm, false, true); vma_end_write_all(mm); downgrade_write(&mm->mmap_lock); } static inline void mmap_read_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, false); down_read(&mm->mmap_lock); __mmap_lock_trace_acquire_returned(mm, false, true); } static inline int mmap_read_lock_killable(struct mm_struct *mm) { int ret; __mmap_lock_trace_start_locking(mm, false); ret = down_read_killable(&mm->mmap_lock); __mmap_lock_trace_acquire_returned(mm, false, ret == 0); return ret; } static inline bool mmap_read_trylock(struct mm_struct *mm) { bool ret; __mmap_lock_trace_start_locking(mm, false); ret = down_read_trylock(&mm->mmap_lock) != 0; __mmap_lock_trace_acquire_returned(mm, false, ret); return ret; } static inline void mmap_read_unlock(struct mm_struct *mm) { __mmap_lock_trace_released(mm, false); up_read(&mm->mmap_lock); } DEFINE_GUARD(mmap_read_lock, struct mm_struct *, mmap_read_lock(_T), mmap_read_unlock(_T)) static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { __mmap_lock_trace_released(mm, false); up_read_non_owner(&mm->mmap_lock); } static inline int mmap_lock_is_contended(struct mm_struct *mm) { return rwsem_is_contended(&mm->mmap_lock); } #endif /* _LINUX_MMAP_LOCK_H */ |
| 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/smp.h> #include "blk.h" #include "blk-mq.h" static void blk_mq_sysfs_release(struct kobject *kobj) { struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); free_percpu(ctxs->queue_ctx); kfree(ctxs); } static void blk_mq_ctx_sysfs_release(struct kobject *kobj) { struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); /* ctx->ctxs won't be released until all ctx are freed */ kobject_put(&ctx->ctxs->kobj); } static void blk_mq_hw_sysfs_release(struct kobject *kobj) { struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); sbitmap_free(&hctx->ctx_map); free_cpumask_var(hctx->cpumask); kfree(hctx->ctxs); kfree(hctx); } struct blk_mq_hw_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_hw_ctx *, char *); }; static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_mq_hw_ctx_sysfs_entry *entry; struct blk_mq_hw_ctx *hctx; struct request_queue *q; ssize_t res; entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); q = hctx->queue; if (!entry->show) return -EIO; mutex_lock(&q->elevator_lock); res = entry->show(hctx, page); mutex_unlock(&q->elevator_lock); return res; } static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { return sprintf(page, "%u\n", hctx->tags->nr_tags); } static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); } static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { const size_t size = PAGE_SIZE - 1; unsigned int i, first = 1; int ret = 0, pos = 0; for_each_cpu(i, hctx->cpumask) { if (first) ret = snprintf(pos + page, size - pos, "%u", i); else ret = snprintf(pos + page, size - pos, ", %u", i); if (ret >= size - pos) break; first = 0; pos += ret; } ret = snprintf(pos + page, size + 1 - pos, "\n"); return pos + ret; } static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { .attr = {.name = "nr_tags", .mode = 0444 }, .show = blk_mq_hw_sysfs_nr_tags_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { .attr = {.name = "nr_reserved_tags", .mode = 0444 }, .show = blk_mq_hw_sysfs_nr_reserved_tags_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { .attr = {.name = "cpu_list", .mode = 0444 }, .show = blk_mq_hw_sysfs_cpus_show, }; static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_nr_tags.attr, &blk_mq_hw_sysfs_nr_reserved_tags.attr, &blk_mq_hw_sysfs_cpus.attr, NULL, }; ATTRIBUTE_GROUPS(default_hw_ctx); static const struct sysfs_ops blk_mq_hw_sysfs_ops = { .show = blk_mq_hw_sysfs_show, }; static const struct kobj_type blk_mq_ktype = { .release = blk_mq_sysfs_release, }; static const struct kobj_type blk_mq_ctx_ktype = { .release = blk_mq_ctx_sysfs_release, }; static const struct kobj_type blk_mq_hw_ktype = { .sysfs_ops = &blk_mq_hw_sysfs_ops, .default_groups = default_hw_ctx_groups, .release = blk_mq_hw_sysfs_release, }; static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) { struct blk_mq_ctx *ctx; int i; if (!hctx->nr_ctx) return; hctx_for_each_ctx(hctx, ctx, i) if (ctx->kobj.state_in_sysfs) kobject_del(&ctx->kobj); if (hctx->kobj.state_in_sysfs) kobject_del(&hctx->kobj); } static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; int i, j, ret; if (!hctx->nr_ctx) return 0; ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); if (ret) return ret; hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) goto out; } return 0; out: hctx_for_each_ctx(hctx, ctx, j) { if (j < i) kobject_del(&ctx->kobj); } kobject_del(&hctx->kobj); return ret; } void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) { kobject_init(&hctx->kobj, &blk_mq_hw_ktype); } void blk_mq_sysfs_deinit(struct request_queue *q) { struct blk_mq_ctx *ctx; int cpu; for_each_possible_cpu(cpu) { ctx = per_cpu_ptr(q->queue_ctx, cpu); kobject_put(&ctx->kobj); } kobject_put(q->mq_kobj); } void blk_mq_sysfs_init(struct request_queue *q) { struct blk_mq_ctx *ctx; int cpu; kobject_init(q->mq_kobj, &blk_mq_ktype); for_each_possible_cpu(cpu) { ctx = per_cpu_ptr(q->queue_ctx, cpu); kobject_get(q->mq_kobj); kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); } } int blk_mq_sysfs_register(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; unsigned long i, j; int ret; ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq"); if (ret < 0) return ret; kobject_uevent(q->mq_kobj, KOBJ_ADD); mutex_lock(&q->tag_set->tag_list_lock); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) goto out_unreg; } mutex_unlock(&q->tag_set->tag_list_lock); return 0; out_unreg: queue_for_each_hw_ctx(q, hctx, j) { if (j < i) blk_mq_unregister_hctx(hctx); } mutex_unlock(&q->tag_set->tag_list_lock); kobject_uevent(q->mq_kobj, KOBJ_REMOVE); kobject_del(q->mq_kobj); return ret; } void blk_mq_sysfs_unregister(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; unsigned long i; mutex_lock(&q->tag_set->tag_list_lock); queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); mutex_unlock(&q->tag_set->tag_list_lock); kobject_uevent(q->mq_kobj, KOBJ_REMOVE); kobject_del(q->mq_kobj); } void blk_mq_sysfs_unregister_hctxs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; if (!blk_queue_registered(q)) return; queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } int blk_mq_sysfs_register_hctxs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; int ret = 0; if (!blk_queue_registered(q)) goto out; queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) break; } out: return ret; } |
| 6 2 2 2 1 1 1 6 2 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 3 6 6 6 6 6 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 | // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> */ #include <linux/module.h> #include "mt76.h" #include "usb_trace.h" #include "dma.h" #define MT_VEND_REQ_MAX_RETRY 10 #define MT_VEND_REQ_TOUT_MS 300 static bool disable_usb_sg; module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; int i, ret; lockdep_assert_held(&dev->usb.usb_ctrl_mtx); pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) : usb_sndctrlpipe(udev, 0); for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { if (test_bit(MT76_REMOVED, &dev->phy.state)) return -EIO; ret = usb_control_msg(udev, pipe, req, req_type, val, offset, buf, len, MT_VEND_REQ_TOUT_MS); if (ret == -ENODEV || ret == -EPROTO) set_bit(MT76_REMOVED, &dev->phy.state); if (ret >= 0 || ret == -ENODEV || ret == -EPROTO) return ret; usleep_range(5000, 10000); } dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", req, offset, ret); return ret; } EXPORT_SYMBOL_GPL(__mt76u_vendor_request); int mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len) { int ret; mutex_lock(&dev->usb.usb_ctrl_mtx); ret = __mt76u_vendor_request(dev, req, req_type, val, offset, buf, len); trace_usb_reg_wr(dev, offset, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); return ret; } EXPORT_SYMBOL_GPL(mt76u_vendor_request); u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr) { struct mt76_usb *usb = &dev->usb; u32 data = ~0; int ret; ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16, addr, usb->data, sizeof(__le32)); if (ret == sizeof(__le32)) data = get_unaligned_le32(usb->data); trace_usb_reg_rr(dev, addr, data); return data; } EXPORT_SYMBOL_GPL(___mt76u_rr); static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) { u8 req; switch (addr & MT_VEND_TYPE_MASK) { case MT_VEND_TYPE_EEPROM: req = MT_VEND_READ_EEPROM; break; case MT_VEND_TYPE_CFG: req = MT_VEND_READ_CFG; break; default: req = MT_VEND_MULTI_READ; break; } return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR, addr & ~MT_VEND_TYPE_MASK); } static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) { u32 ret; mutex_lock(&dev->usb.usb_ctrl_mtx); ret = __mt76u_rr(dev, addr); mutex_unlock(&dev->usb.usb_ctrl_mtx); return ret; } void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr, u32 val) { struct mt76_usb *usb = &dev->usb; put_unaligned_le32(val, usb->data); __mt76u_vendor_request(dev, req, req_type, addr >> 16, addr, usb->data, sizeof(__le32)); trace_usb_reg_wr(dev, addr, val); } EXPORT_SYMBOL_GPL(___mt76u_wr); static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) { u8 req; switch (addr & MT_VEND_TYPE_MASK) { case MT_VEND_TYPE_CFG: req = MT_VEND_WRITE_CFG; break; default: req = MT_VEND_MULTI_WRITE; break; } ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, addr & ~MT_VEND_TYPE_MASK, val); } static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); } static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); val |= __mt76u_rr(dev, addr) & ~mask; __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); return val; } static void mt76u_copy(struct mt76_dev *dev, u32 offset, const void *data, int len) { struct mt76_usb *usb = &dev->usb; const u8 *val = data; int ret; int current_batch_size; int i = 0; /* Assure that always a multiple of 4 bytes are copied, * otherwise beacons can be corrupted. * See: "mt76: round up length on mt76_wr_copy" * Commit 850e8f6fbd5d0003b0 */ len = round_up(len, 4); mutex_lock(&usb->usb_ctrl_mtx); while (i < len) { current_batch_size = min_t(int, usb->data_len, len - i); memcpy(usb->data, val + i, current_batch_size); ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, USB_DIR_OUT | USB_TYPE_VENDOR, 0, offset + i, usb->data, current_batch_size); if (ret < 0) break; i += current_batch_size; } mutex_unlock(&usb->usb_ctrl_mtx); } void mt76u_read_copy(struct mt76_dev *dev, u32 offset, void *data, int len) { struct mt76_usb *usb = &dev->usb; int i = 0, batch_len, ret; u8 *val = data; len = round_up(len, 4); mutex_lock(&usb->usb_ctrl_mtx); while (i < len) { batch_len = min_t(int, usb->data_len, len - i); ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT, USB_DIR_IN | USB_TYPE_VENDOR, (offset + i) >> 16, offset + i, usb->data, batch_len); if (ret < 0) break; memcpy(val + i, usb->data, batch_len); i += batch_len; } mutex_unlock(&usb->usb_ctrl_mtx); } EXPORT_SYMBOL_GPL(mt76u_read_copy); void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); __mt76u_vendor_request(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, val & 0xffff, offset, NULL, 0); __mt76u_vendor_request(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, val >> 16, offset + 2, NULL, 0); mutex_unlock(&dev->usb.usb_ctrl_mtx); } EXPORT_SYMBOL_GPL(mt76u_single_wr); static int mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *data, int len) { struct mt76_usb *usb = &dev->usb; mutex_lock(&usb->usb_ctrl_mtx); while (len > 0) { __mt76u_wr(dev, base + data->reg, data->value); len--; data++; } mutex_unlock(&usb->usb_ctrl_mtx); return 0; } static int mt76u_wr_rp(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *data, int n) { if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); else return mt76u_req_wr_rp(dev, base, data, n); } static int mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int len) { struct mt76_usb *usb = &dev->usb; mutex_lock(&usb->usb_ctrl_mtx); while (len > 0) { data->value = __mt76u_rr(dev, base + data->reg); len--; data++; } mutex_unlock(&usb->usb_ctrl_mtx); return 0; } static int mt76u_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int n) { if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); else return mt76u_req_rd_rp(dev, base, data, n); } static bool mt76u_check_sg(struct mt76_dev *dev) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && udev->bus->no_sg_constraint); } static int mt76u_set_endpoints(struct usb_interface *intf, struct mt76_usb *usb) { struct usb_host_interface *intf_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; int i, in_ep = 0, out_ep = 0; for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc) && in_ep < __MT_EP_IN_MAX) { usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); in_ep++; } else if (usb_endpoint_is_bulk_out(ep_desc) && out_ep < __MT_EP_OUT_MAX) { usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); out_ep++; } } if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) return -EINVAL; return 0; } static int mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, int nsgs) { int i; for (i = 0; i < nsgs; i++) { void *data; int offset; data = mt76_get_page_pool_buf(q, &offset, q->buf_size); if (!data) break; sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, offset); } if (i < nsgs) { int j; for (j = nsgs; j < urb->num_sgs; j++) mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false); urb->num_sgs = i; } urb->num_sgs = max_t(int, i, urb->num_sgs); urb->transfer_buffer_length = urb->num_sgs * q->buf_size; sg_init_marker(urb->sg, urb->num_sgs); return i ? : -ENOMEM; } static int mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, int nsgs) { enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; int offset; if (qid == MT_RXQ_MAIN && dev->usb.sg_en) return mt76u_fill_rx_sg(dev, q, urb, nsgs); urb->transfer_buffer_length = q->buf_size; urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); return urb->transfer_buffer ? 0 : -ENOMEM; } static int mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, int sg_max_size) { unsigned int size = sizeof(struct urb); if (dev->usb.sg_en) size += sg_max_size * sizeof(struct scatterlist); e->urb = kzalloc(size, GFP_KERNEL); if (!e->urb) return -ENOMEM; usb_init_urb(e->urb); if (dev->usb.sg_en && sg_max_size > 0) e->urb->sg = (struct scatterlist *)(e->urb + 1); return 0; } static int mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e) { enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; int err, sg_size; sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0; err = mt76u_urb_alloc(dev, e, sg_size); if (err) return err; return mt76u_refill_rx(dev, q, e->urb, sg_size); } static void mt76u_urb_free(struct urb *urb) { int i; for (i = 0; i < urb->num_sgs; i++) mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false); if (urb->transfer_buffer) mt76_put_page_pool_buf(urb->transfer_buffer, false); usb_free_urb(urb); } static void mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, struct urb *urb, usb_complete_t complete_fn, void *context) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; if (dir == USB_DIR_IN) pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); else pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); urb->dev = udev; urb->pipe = pipe; urb->complete = complete_fn; urb->context = context; } static struct urb * mt76u_get_next_rx_entry(struct mt76_queue *q) { struct urb *urb = NULL; unsigned long flags; spin_lock_irqsave(&q->lock, flags); if (q->queued > 0) { urb = q->entry[q->tail].urb; q->tail = (q->tail + 1) % q->ndesc; q->queued--; } spin_unlock_irqrestore(&q->lock, flags); return urb; } static int mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data, u32 data_len) { u16 dma_len, min_len; dma_len = get_unaligned_le16(data); if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR) return dma_len; min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN; if (data_len < min_len || !dma_len || dma_len + MT_DMA_HDR_LEN > data_len || (dma_len & 0x3)) return -EINVAL; return dma_len; } static struct sk_buff * mt76u_build_rx_skb(struct mt76_dev *dev, void *data, int len, int buf_size) { int head_room, drv_flags = dev->drv->drv_flags; struct sk_buff *skb; head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) { struct page *page; /* slow path, not enough space for data and * skb_shared_info */ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); if (!skb) return NULL; skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN); data += head_room + MT_SKB_HEAD_LEN; page = virt_to_head_page(data); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, data - page_address(page), len - MT_SKB_HEAD_LEN, buf_size); return skb; } /* fast path */ skb = build_skb(data, buf_size); if (!skb) return NULL; skb_reserve(skb, head_room); __skb_put(skb, len); return skb; } static int mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, int buf_size) { u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags; struct sk_buff *skb; if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) return 0; len = mt76u_get_rx_entry_len(dev, data, urb->actual_length); if (len < 0) return 0; head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; data_len = min_t(int, len, data_len - head_room); if (len == data_len && dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len)) return 0; skb = mt76u_build_rx_skb(dev, data, data_len, buf_size); if (!skb) return 0; len -= data_len; while (len > 0 && nsgs < urb->num_sgs) { data_len = min_t(int, len, urb->sg[nsgs].length); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, sg_page(&urb->sg[nsgs]), urb->sg[nsgs].offset, data_len, buf_size); len -= data_len; nsgs++; } skb_mark_for_recycle(skb); dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL); return nsgs; } static void mt76u_complete_rx(struct urb *urb) { struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); struct mt76_queue *q = urb->context; unsigned long flags; trace_rx_urb(dev, urb); switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default: dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", urb->status); fallthrough; case 0: break; } spin_lock_irqsave(&q->lock, flags); if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) goto out; q->head = (q->head + 1) % q->ndesc; q->queued++; mt76_worker_schedule(&dev->usb.rx_worker); out: spin_unlock_irqrestore(&q->lock, flags); } static int mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid, struct urb *urb) { int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP; mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb, mt76u_complete_rx, &dev->q_rx[qid]); trace_submit_urb(dev, urb); return usb_submit_urb(urb, GFP_ATOMIC); } static void mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) { int qid = q - &dev->q_rx[MT_RXQ_MAIN]; struct urb *urb; int err, count; while (true) { urb = mt76u_get_next_rx_entry(q); if (!urb) break; count = mt76u_process_rx_entry(dev, urb, q->buf_size); if (count > 0) { err = mt76u_refill_rx(dev, q, urb, count); if (err < 0) break; } mt76u_submit_rx_buf(dev, qid, urb); } if (qid == MT_RXQ_MAIN) { local_bh_disable(); mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); local_bh_enable(); } } static void mt76u_rx_worker(struct mt76_worker *w) { struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker); struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); int i; rcu_read_lock(); mt76_for_each_q_rx(dev, i) mt76u_process_rx_queue(dev, &dev->q_rx[i]); rcu_read_unlock(); } static int mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; unsigned long flags; int i, err = 0; spin_lock_irqsave(&q->lock, flags); for (i = 0; i < q->ndesc; i++) { err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); if (err < 0) break; } q->head = q->tail = 0; q->queued = 0; spin_unlock_irqrestore(&q->lock, flags); return err; } static int mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; int i, err; err = mt76_create_page_pool(dev, q); if (err) return err; spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, MT_NUM_RX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; q->ndesc = MT_NUM_RX_ENTRIES; q->buf_size = PAGE_SIZE; for (i = 0; i < q->ndesc; i++) { err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]); if (err < 0) return err; } return mt76u_submit_rx_buffers(dev, qid); } int mt76u_alloc_mcu_queue(struct mt76_dev *dev) { return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU); } EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue); static void mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) { int i; for (i = 0; i < q->ndesc; i++) { if (!q->entry[i].urb) continue; mt76u_urb_free(q->entry[i].urb); q->entry[i].urb = NULL; } page_pool_destroy(q->page_pool); q->page_pool = NULL; } static void mt76u_free_rx(struct mt76_dev *dev) { int i; mt76_worker_teardown(&dev->usb.rx_worker); mt76_for_each_q_rx(dev, i) mt76u_free_rx_queue(dev, &dev->q_rx[i]); } void mt76u_stop_rx(struct mt76_dev *dev) { int i; mt76_worker_disable(&dev->usb.rx_worker); mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; int j; for (j = 0; j < q->ndesc; j++) usb_poison_urb(q->entry[j].urb); } } EXPORT_SYMBOL_GPL(mt76u_stop_rx); int mt76u_resume_rx(struct mt76_dev *dev) { int i; mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; int err, j; for (j = 0; j < q->ndesc; j++) usb_unpoison_urb(q->entry[j].urb); err = mt76u_submit_rx_buffers(dev, i); if (err < 0) return err; } mt76_worker_enable(&dev->usb.rx_worker); return 0; } EXPORT_SYMBOL_GPL(mt76u_resume_rx); static void mt76u_status_worker(struct mt76_worker *w) { struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker); struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); struct mt76_queue_entry entry; struct mt76_queue *q; int i; if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state)) return; for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; while (q->queued > 0) { if (!q->entry[q->tail].done) break; entry = q->entry[q->tail]; q->entry[q->tail].done = false; mt76_queue_tx_complete(dev, q, &entry); } if (!q->queued) wake_up(&dev->tx_wait); mt76_worker_schedule(&dev->tx_worker); } if (dev->drv->tx_status_data && !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) queue_work(dev->wq, &dev->usb.stat_work); } static void mt76u_tx_status_data(struct work_struct *work) { struct mt76_usb *usb; struct mt76_dev *dev; u8 update = 1; u16 count = 0; usb = container_of(work, struct mt76_usb, stat_work); dev = container_of(usb, struct mt76_dev, usb); while (true) { if (test_bit(MT76_REMOVED, &dev->phy.state)) break; if (!dev->drv->tx_status_data(dev, &update)) break; count++; } if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) queue_work(dev->wq, &usb->stat_work); else clear_bit(MT76_READING_STATS, &dev->phy.state); } static void mt76u_complete_tx(struct urb *urb) { struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); struct mt76_queue_entry *e = urb->context; if (mt76u_urb_error(urb)) dev_err(dev->dev, "tx urb failed: %d\n", urb->status); e->done = true; mt76_worker_schedule(&dev->usb.status_worker); } static int mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, struct urb *urb) { urb->transfer_buffer_length = skb->len; if (!dev->usb.sg_en) { urb->transfer_buffer = skb->data; return 0; } sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); if (!urb->num_sgs) return -ENOMEM; return urb->num_sgs; } static int mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { struct mt76_tx_info tx_info = { .skb = skb, }; struct mt76_dev *dev = phy->dev; u16 idx = q->head; int err; if (q->queued == q->ndesc) return -ENOSPC; skb->prev = skb->next = NULL; err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); if (err < 0) return err; err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); if (err < 0) return err; mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb, mt76u_complete_tx, &q->entry[idx]); q->head = (q->head + 1) % q->ndesc; q->entry[idx].skb = tx_info.skb; q->entry[idx].wcid = 0xffff; q->queued++; return idx; } static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) { struct urb *urb; int err; while (q->first != q->head) { urb = q->entry[q->first].urb; trace_submit_urb(dev, urb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err == -ENODEV) set_bit(MT76_REMOVED, &dev->phy.state); else dev_err(dev->dev, "tx urb submit failed:%d\n", err); break; } q->first = (q->first + 1) % q->ndesc; } } static void mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid) { u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE; switch (mt76_chip(dev)) { case 0x7663: { static const u8 lmac_queue_map[] = { /* ac to lmac mapping */ [IEEE80211_AC_BK] = 0, [IEEE80211_AC_BE] = 1, [IEEE80211_AC_VI] = 2, [IEEE80211_AC_VO] = 4, }; q->hw_idx = lmac_queue_map[ac]; q->ep = q->hw_idx + 1; break; } case 0x7961: case 0x7925: q->hw_idx = mt76_ac_to_hwq(ac); q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1; break; default: q->hw_idx = mt76_ac_to_hwq(ac); q->ep = q->hw_idx + 1; break; } } static int mt76u_alloc_tx(struct mt76_dev *dev) { int i; for (i = 0; i <= MT_TXQ_PSD; i++) { struct mt76_queue *q; int j, err; q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); if (!q) return -ENOMEM; spin_lock_init(&q->lock); mt76u_ac_to_hwq(dev, q, i); dev->phy.q_tx[i] = q; q->entry = devm_kcalloc(dev->dev, MT_NUM_TX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; q->ndesc = MT_NUM_TX_ENTRIES; for (j = 0; j < q->ndesc; j++) { err = mt76u_urb_alloc(dev, &q->entry[j], MT_TX_SG_MAX_SIZE); if (err < 0) return err; } } return 0; } static void mt76u_free_tx(struct mt76_dev *dev) { int i; mt76_worker_teardown(&dev->usb.status_worker); for (i = 0; i <= MT_TXQ_PSD; i++) { struct mt76_queue *q; int j; q = dev->phy.q_tx[i]; if (!q) continue; for (j = 0; j < q->ndesc; j++) { usb_free_urb(q->entry[j].urb); q->entry[j].urb = NULL; } } } void mt76u_stop_tx(struct mt76_dev *dev) { int ret; mt76_worker_disable(&dev->usb.status_worker); ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { struct mt76_queue_entry entry; struct mt76_queue *q; int i, j; dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; for (j = 0; j < q->ndesc; j++) usb_kill_urb(q->entry[j].urb); } mt76_worker_disable(&dev->tx_worker); /* On device removal we maight queue skb's, but mt76u_tx_kick() * will fail to submit urb, cleanup those skb's manually. */ for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; while (q->queued > 0) { entry = q->entry[q->tail]; q->entry[q->tail].done = false; mt76_queue_tx_complete(dev, q, &entry); } } mt76_worker_enable(&dev->tx_worker); } cancel_work_sync(&dev->usb.stat_work); clear_bit(MT76_READING_STATS, &dev->phy.state); mt76_worker_enable(&dev->usb.status_worker); mt76_tx_status_check(dev, true); } EXPORT_SYMBOL_GPL(mt76u_stop_tx); void mt76u_queues_deinit(struct mt76_dev *dev) { mt76u_stop_rx(dev); mt76u_stop_tx(dev); mt76u_free_rx(dev); mt76u_free_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_queues_deinit); int mt76u_alloc_queues(struct mt76_dev *dev) { int err; err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN); if (err < 0) return err; return mt76u_alloc_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_alloc_queues); static const struct mt76_queue_ops usb_queue_ops = { .tx_queue_skb = mt76u_tx_queue_skb, .kick = mt76u_tx_kick, }; int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, struct mt76_bus_ops *ops) { struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; int err; INIT_WORK(&usb->stat_work, mt76u_tx_status_data); usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0)); if (usb->data_len < 32) usb->data_len = 32; usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL); if (!usb->data) return -ENOMEM; mutex_init(&usb->usb_ctrl_mtx); dev->bus = ops; dev->queue_ops = &usb_queue_ops; dev_set_drvdata(&udev->dev, dev); usb->sg_en = mt76u_check_sg(dev); err = mt76u_set_endpoints(intf, usb); if (err < 0) return err; err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker, "usb-rx"); if (err) return err; err = mt76_worker_setup(dev->hw, &usb->status_worker, mt76u_status_worker, "usb-status"); if (err) return err; sched_set_fifo_low(usb->rx_worker.task); sched_set_fifo_low(usb->status_worker.task); return 0; } EXPORT_SYMBOL_GPL(__mt76u_init); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf) { static struct mt76_bus_ops bus_ops = { .rr = mt76u_rr, .wr = mt76u_wr, .rmw = mt76u_rmw, .read_copy = mt76u_read_copy, .write_copy = mt76u_copy, .wr_rp = mt76u_wr_rp, .rd_rp = mt76u_rd_rp, .type = MT76_BUS_USB, }; return __mt76u_init(dev, intf, &bus_ops); } EXPORT_SYMBOL_GPL(mt76u_init); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); MODULE_DESCRIPTION("MediaTek MT76x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); |
| 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 | // SPDX-License-Identifier: GPL-2.0+ /* * ext4_jbd2.h * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1998--1999 Red Hat corp --- All Rights Reserved * * Ext4-specific journaling extensions. */ #ifndef _EXT4_JBD2_H #define _EXT4_JBD2_H #include <linux/fs.h> #include <linux/jbd2.h> #include "ext4.h" #define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal) /* Define the number of blocks we need to account to a transaction to * modify one block of data. * * We may have to touch one inode, one bitmap buffer, up to three * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. * * For extents-enabled fs we may have to allocate and modify up to * 5 levels of tree, data block (for each of these we need bitmap + group * summaries), root which is stored in the inode, sb */ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ (ext4_has_feature_extents(sb) ? 20U : 8U) /* Extended attribute operations touch at most two data buffers, * two bitmap buffers, and two group summaries, in addition to the inode * and the superblock, which are already accounted for. */ #define EXT4_XATTR_TRANS_BLOCKS 6U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two * quota files too (one for the group, one for the user quota). The * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ EXT4_XATTR_TRANS_BLOCKS - 2 + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* * Define the number of metadata blocks we need to account to modify data. * * This include super block, inode block, quota blocks and xattr blocks */ #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as * write(2) and truncate(2) we can write more than this, but we always * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ #define EXT4_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the * transaction or to start a new one. Reserve enough space here for * inode, bitmap, superblock, group and indirection updates for at least * one block, plus two quota updates. Quota allocations are not * needed. */ #define EXT4_RESERVE_TRANS_BLOCKS 12U /* * Number of credits needed if we need to insert an entry into a * directory. For each new index block, we need 4 blocks (old index * block, new index block, bitmap block, bg summary). For normal * htree directories there are 2 levels; if the largedir feature * enabled it's 3 levels. */ #define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U #ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was * allocated so we need to update only data block */ #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((ext4_quota_capable(sb)) ? 1 : 0) /* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ #define EXT4_QUOTA_INIT_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_INIT_REWRITE) : 0) #define EXT4_QUOTA_DEL_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_DEL_REWRITE) : 0) #else #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) /* * Ext4 handle operation types -- for logging purposes */ #define EXT4_HT_MISC 0 #define EXT4_HT_INODE 1 #define EXT4_HT_WRITE_PAGE 2 #define EXT4_HT_MAP_BLOCKS 3 #define EXT4_HT_DIR 4 #define EXT4_HT_TRUNCATE 5 #define EXT4_HT_QUOTA 6 #define EXT4_HT_RESIZE 7 #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 #define EXT4_HT_EXT_CONVERT 11 #define EXT4_HT_MAX 12 int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); #define ext4_mark_inode_dirty(__h, __i) \ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, const char *func, unsigned int line); int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc); /* * Wrapper functions with which ext4 calls into JBD. */ int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); int __ext4_journal_get_create_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh); #define ext4_journal_get_write_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ __ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \ (bh), (block_nr)) #define ext4_journal_get_create_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_create_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_handle_dirty_metadata(handle, inode, bh) \ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) /* Note: Do not use this for NULL handles. This is only to determine if * a properly allocated handle is using a journal or not. */ static inline int ext4_handle_valid(handle_t *handle) { if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT) return 0; return 1; } static inline void ext4_handle_sync(handle_t *handle) { if (ext4_handle_valid(handle)) handle->h_sync = 1; } static inline int ext4_handle_is_aborted(handle_t *handle) { if (ext4_handle_valid(handle)) return is_handle_aborted(handle); return 0; } static inline int ext4_free_metadata_revoke_credits(struct super_block *sb, int blocks) { /* Freeing each metadata block can result in freeing one cluster */ return blocks * EXT4_SB(sb)->s_cluster_ratio; } static inline int ext4_trans_default_revoke_credits(struct super_block *sb) { return ext4_free_metadata_revoke_credits(sb, 8); } #define ext4_journal_start_sb(sb, type, nblocks) \ __ext4_journal_start_sb(NULL, (sb), __LINE__, (type), (nblocks), 0,\ ext4_trans_default_revoke_credits(sb)) #define ext4_journal_start(inode, type, nblocks) \ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \ (revoke_creds)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds) { return __ext4_journal_start_sb(inode, inode->i_sb, line, type, blocks, rsv_blocks, revoke_creds); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) #define ext4_journal_start_reserved(handle, type) \ __ext4_journal_start_reserved((handle), __LINE__, (type)) handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); } static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2_journal_extend(handle, nblocks, revoke); return 0; } static inline int ext4_journal_restart(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); return 0; } int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, int extend_cred, int revoke_cred); /* * Ensure @handle has at least @check_creds credits available. If not, * transaction will be extended or restarted to contain at least @extend_cred * credits. Before restarting transaction @fn is executed to allow for cleanup * before the transaction is restarted. * * The return value is < 0 in case of error, 0 in case the handle has enough * credits or transaction extension succeeded, 1 in case transaction had to be * restarted. */ #define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \ revoke_cred, fn) \ ({ \ __label__ __ensure_end; \ int err = __ext4_journal_ensure_credits((handle), (check_cred), \ (extend_cred), (revoke_cred)); \ \ if (err <= 0) \ goto __ensure_end; \ err = (fn); \ if (err < 0) \ goto __ensure_end; \ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \ if (err == 0) \ err = 1; \ __ensure_end: \ err; \ }) /* * Ensure given handle has at least requested amount of credits available, * possibly restarting transaction if needed. We also make sure the transaction * has space for at least ext4_trans_default_revoke_credits(sb) revoke records * as freeing one or two blocks is very common pattern and requesting this is * very cheap. */ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits, int revoke_creds) { return ext4_journal_ensure_credits_fn(handle, credits, credits, revoke_creds, 0); } static inline int ext4_journal_blocks_per_folio(struct inode *inode) { if (EXT4_JOURNAL(inode) != NULL) return jbd2_journal_blocks_per_folio(inode); return 0; } static inline int ext4_journal_force_commit(journal_t *journal) { if (journal) return jbd2_journal_force_commit(journal); return 0; } static inline int ext4_jbd2_inode_add_write(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_write(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline int ext4_jbd2_inode_add_wait(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_wait(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline void ext4_update_inode_fsync_trans(handle_t *handle, struct inode *inode, int datasync) { struct ext4_inode_info *ei = EXT4_I(inode); if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; if (datasync) ei->i_datasync_tid = handle->h_transaction->t_tid; } } /* super.c */ int ext4_force_commit(struct super_block *sb); /* * Ext4 inode journal modes */ #define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ #define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ #define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ int ext4_inode_journal_mode(struct inode *inode); static inline int ext4_should_journal_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; } static inline int ext4_should_order_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; } static inline int ext4_should_writeback_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; } static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks) { if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 0; if (!ext4_should_journal_data(inode)) return 0; /* * Data blocks in one extent are contiguous, just account for partial * clusters at extent boundaries */ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1); } /* * This function controls whether or not we should try to go down the * dioread_nolock code paths, which makes it safe to avoid taking * i_rwsem for direct I/O reads. This only works for extent-based * files, and it doesn't work if data journaling is enabled, since the * dioread_nolock code uses b_private to pass information back to the * I/O completion handler, and this conflicts with the jbd's use of * b_private. */ static inline int ext4_should_dioread_nolock(struct inode *inode) { if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) return 0; if (!S_ISREG(inode->i_mode)) return 0; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return 0; if (ext4_should_journal_data(inode)) return 0; /* temporary fix to prevent generic/422 test failures */ if (!test_opt(inode->i_sb, DELALLOC)) return 0; return 1; } /* * Pass journal explicitly as it may not be cached in the sbi->s_journal in some * cases */ static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal) { int err = 0; /* * At this point only two things can be operating on the journal. * JBD2 thread performing transaction commit and s_sb_upd_work * issuing sb update through the journal. Once we set * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not * queue s_sb_upd_work and ext4_force_commit() makes sure any * ext4_handle_error() calls from the running transaction commit are * finished. Hence no new s_sb_upd_work can be queued after we * flush it here. */ ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY); ext4_force_commit(sbi->s_sb); flush_work(&sbi->s_sb_upd_work); err = jbd2_journal_destroy(journal); sbi->s_journal = NULL; return err; } #endif /* _EXT4_JBD2_H */ |
| 3 2 1 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Surface2.0/SUR40/PixelSense input driver * * Copyright (c) 2014 by Florian 'floe' Echtler <floe@butterbrot.org> * * Derived from the USB Skeleton driver 1.1, * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com) * * and from the Apple USB BCM5974 multitouch driver, * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se) * * and from the generic hid-multitouch driver, * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> * * and from the v4l2-pci-skeleton driver, * Copyright (c) Copyright 2014 Cisco Systems, Inc. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/printk.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/usb/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-sg.h> /* read 512 bytes from endpoint 0x86 -> get header + blobs */ struct sur40_header { __le16 type; /* always 0x0001 */ __le16 count; /* count of blobs (if 0: continue prev. packet) */ __le32 packet_id; /* unique ID for all packets in one frame */ __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; struct sur40_blob { __le16 blob_id; u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ __le16 bb_pos_x; /* upper left corner of bounding box */ __le16 bb_pos_y; __le16 bb_size_x; /* size of bounding box */ __le16 bb_size_y; __le16 pos_x; /* finger tip position */ __le16 pos_y; __le16 ctr_x; /* centroid position */ __le16 ctr_y; __le16 axis_x; /* somehow related to major/minor axis, mostly: */ __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */ __le32 angle; /* orientation in radians relative to x axis - actually an IEEE754 float, don't use in kernel */ __le32 area; /* size in pixels/pressure (?) */ u8 padding[24]; __le32 tag_id; /* valid when type == 0x04 (SUR40_TAG) */ __le32 unknown; } __packed; /* combined header/blob data */ struct sur40_data { struct sur40_header header; struct sur40_blob blobs[]; } __packed; /* read 512 bytes from endpoint 0x82 -> get header below * continue reading 16k blocks until header.size bytes read */ struct sur40_image_header { __le32 magic; /* "SUBF" */ __le32 packet_id; __le32 size; /* always 0x0007e900 = 960x540 */ __le32 timestamp; /* milliseconds (increases by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; /* version information */ #define DRIVER_SHORT "sur40" #define DRIVER_LONG "Samsung SUR40" #define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>" #define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver" /* vendor and device IDs */ #define ID_MICROSOFT 0x045e #define ID_SUR40 0x0775 /* sensor resolution */ #define SENSOR_RES_X 1920 #define SENSOR_RES_Y 1080 /* touch data endpoint */ #define TOUCH_ENDPOINT 0x86 /* video data endpoint */ #define VIDEO_ENDPOINT 0x82 /* video header fields */ #define VIDEO_HEADER_MAGIC 0x46425553 #define VIDEO_PACKET_SIZE 16384 /* polling interval (ms) */ #define POLL_INTERVAL 1 /* maximum number of contacts FIXME: this is a guess? */ #define MAX_CONTACTS 64 /* control commands */ #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ #define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ #define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ #define SUR40_POKE 0xc5 /* poke register byte */ #define SUR40_PEEK 0xc4 /* 48 bytes registers */ #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ #define SUR40_BLOB 0x01 #define SUR40_TOUCH 0x02 #define SUR40_TAG 0x04 /* video controls */ #define SUR40_BRIGHTNESS_MAX 0xff #define SUR40_BRIGHTNESS_MIN 0x00 #define SUR40_BRIGHTNESS_DEF 0xff #define SUR40_CONTRAST_MAX 0x0f #define SUR40_CONTRAST_MIN 0x00 #define SUR40_CONTRAST_DEF 0x0a #define SUR40_GAIN_MAX 0x09 #define SUR40_GAIN_MIN 0x00 #define SUR40_GAIN_DEF 0x08 #define SUR40_BACKLIGHT_MAX 0x01 #define SUR40_BACKLIGHT_MIN 0x00 #define SUR40_BACKLIGHT_DEF 0x01 #define sur40_str(s) #s #define SUR40_PARAM_RANGE(lo, hi) " (range " sur40_str(lo) "-" sur40_str(hi) ")" /* module parameters */ static uint brightness = SUR40_BRIGHTNESS_DEF; module_param(brightness, uint, 0644); MODULE_PARM_DESC(brightness, "set initial brightness" SUR40_PARAM_RANGE(SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX)); static uint contrast = SUR40_CONTRAST_DEF; module_param(contrast, uint, 0644); MODULE_PARM_DESC(contrast, "set initial contrast" SUR40_PARAM_RANGE(SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX)); static uint gain = SUR40_GAIN_DEF; module_param(gain, uint, 0644); MODULE_PARM_DESC(gain, "set initial gain" SUR40_PARAM_RANGE(SUR40_GAIN_MIN, SUR40_GAIN_MAX)); static const struct v4l2_pix_format sur40_pix_format[] = { { .pixelformat = V4L2_TCH_FMT_TU08, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), }, { .pixelformat = V4L2_PIX_FMT_GREY, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), } }; /* master device state */ struct sur40_state { struct usb_device *usbdev; struct device *dev; struct input_dev *input; struct v4l2_device v4l2; struct video_device vdev; struct mutex lock; struct v4l2_pix_format pix_fmt; struct v4l2_ctrl_handler hdl; struct vb2_queue queue; struct list_head buf_list; spinlock_t qlock; int sequence; struct sur40_data *bulk_in_buffer; size_t bulk_in_size; u8 bulk_in_epaddr; u8 vsvideo; char phys[64]; }; struct sur40_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; /* forward declarations */ static const struct video_device sur40_video_device; static const struct vb2_queue sur40_queue; static void sur40_process_video(struct sur40_state *sur40); static int sur40_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops sur40_ctrl_ops = { .s_ctrl = sur40_s_ctrl, }; /* * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT * here by mistake which is very likely to have corrupted the firmware EEPROM * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug. * Should you ever run into a similar problem, the background story to this * incident and instructions on how to fix the corrupted EEPROM are available * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html */ /* command wrapper */ static int sur40_command(struct sur40_state *dev, u8 command, u16 index, void *buffer, u16 size) { return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), command, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, index, buffer, size, 1000); } /* poke a byte in the panel register space */ static int sur40_poke(struct sur40_state *dev, u8 offset, u8 value) { int result; u8 index = 0x96; // 0xae for permanent write result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x32, index, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x72, offset, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0xb2, value, NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static int sur40_set_preprocessor(struct sur40_state *dev, u8 value) { u8 setting_07[2] = { 0x01, 0x00 }; u8 setting_17[2] = { 0x85, 0x80 }; int result; if (value > 1) return -ERANGE; result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x07, setting_07[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x17, setting_17[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static void sur40_set_vsvideo(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 4; i++) sur40_poke(handle, 0x1c+i, value); handle->vsvideo = value; } static void sur40_set_irlevel(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 8; i++) sur40_poke(handle, 0x08+(2*i), value); } /* Initialization routine, called from sur40_open */ static int sur40_init(struct sur40_state *dev) { int result; u8 *buffer; buffer = kmalloc(24, GFP_KERNEL); if (!buffer) { result = -ENOMEM; goto error; } /* stupidly replay the original MS driver init sequence */ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); if (result < 0) goto error; result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); if (result < 0) goto error; result = 0; /* * Discard the result buffer - no known data inside except * some version strings, maybe extract these sometime... */ error: kfree(buffer); return result; } /* * Callback routines from input_dev */ /* Enable the device, polling will now start. */ static int sur40_open(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "open\n"); return sur40_init(sur40); } /* Disable device, polling has stopped. */ static void sur40_close(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "close\n"); /* * There is no known way to stop the device, so we simply * stop polling. */ } /* * This function is called when a whole contact has been processed, * so that it can assign it to a slot and store the data there. */ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) { int wide, major, minor; int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; if (blob->type != SUR40_TOUCH) return; slotnum = input_mt_get_slot_by_key(input, le16_to_cpu(blob->blob_id)); if (slotnum < 0 || slotnum >= MAX_CONTACTS) return; bb_size_x = le16_to_cpu(blob->bb_size_x); bb_size_y = le16_to_cpu(blob->bb_size_y); pos_x = le16_to_cpu(blob->pos_x); pos_y = le16_to_cpu(blob->pos_y); ctr_x = le16_to_cpu(blob->ctr_x); ctr_y = le16_to_cpu(blob->ctr_y); input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); wide = (bb_size_x > bb_size_y); major = max(bb_size_x, bb_size_y); minor = min(bb_size_x, bb_size_y); input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, pos_y); input_report_abs(input, ABS_MT_TOOL_X, ctr_x); input_report_abs(input, ABS_MT_TOOL_Y, ctr_y); /* TODO: use a better orientation measure */ input_report_abs(input, ABS_MT_ORIENTATION, wide); input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); } /* core function: poll for new input data */ static void sur40_poll(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); int result, bulk_read, need_blobs, packet_blobs, i; struct sur40_header *header = &sur40->bulk_in_buffer->header; struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; dev_dbg(sur40->dev, "poll\n"); need_blobs = -1; do { /* perform a blocking bulk read to get data from the device */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); dev_dbg(sur40->dev, "received %d bytes\n", bulk_read); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); return; } result = bulk_read - sizeof(struct sur40_header); if (result % sizeof(struct sur40_blob) != 0) { dev_err(sur40->dev, "transfer size mismatch\n"); return; } /* first packet? */ if (need_blobs == -1) { need_blobs = le16_to_cpu(header->count); dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); /* packet_id = le32_to_cpu(header->packet_id); */ } /* * Sanity check. when video data is also being retrieved, the * packet ID will usually increase in the middle of a series * instead of at the end. However, the data is still consistent, * so the packet ID is probably just valid for the first packet * in a series. if (packet_id != le32_to_cpu(header->packet_id)) dev_dbg(sur40->dev, "packet ID mismatch\n"); */ packet_blobs = result / sizeof(struct sur40_blob); dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); /* packets always contain at least 4 blobs, even if empty */ if (packet_blobs > need_blobs) packet_blobs = need_blobs; for (i = 0; i < packet_blobs; i++) { need_blobs--; dev_dbg(sur40->dev, "processing blob\n"); sur40_report_blob(&(inblob[i]), input); } } while (need_blobs > 0); input_mt_sync_frame(input); input_sync(input); sur40_process_video(sur40); } /* deal with video data */ static void sur40_process_video(struct sur40_state *sur40) { struct sur40_image_header *img = (void *)(sur40->bulk_in_buffer); struct sur40_buffer *new_buf; struct usb_sg_request sgr; struct sg_table *sgt; int result, bulk_read; if (!vb2_start_streaming_called(&sur40->queue)) return; /* get a new buffer from the list */ spin_lock(&sur40->qlock); if (list_empty(&sur40->buf_list)) { dev_dbg(sur40->dev, "buffer queue empty\n"); spin_unlock(&sur40->qlock); return; } new_buf = list_entry(sur40->buf_list.next, struct sur40_buffer, list); list_del(&new_buf->list); spin_unlock(&sur40->qlock); dev_dbg(sur40->dev, "buffer acquired\n"); /* retrieve data via bulk read */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); goto err_poll; } if (bulk_read != sizeof(struct sur40_image_header)) { dev_err(sur40->dev, "received %d bytes (%zd expected)\n", bulk_read, sizeof(struct sur40_image_header)); goto err_poll; } if (le32_to_cpu(img->magic) != VIDEO_HEADER_MAGIC) { dev_err(sur40->dev, "image magic mismatch\n"); goto err_poll; } if (le32_to_cpu(img->size) != sur40->pix_fmt.sizeimage) { dev_err(sur40->dev, "image size mismatch\n"); goto err_poll; } dev_dbg(sur40->dev, "header acquired\n"); sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0); result = usb_sg_init(&sgr, sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0, sgt->sgl, sgt->nents, sur40->pix_fmt.sizeimage, 0); if (result < 0) { dev_err(sur40->dev, "error %d in usb_sg_init\n", result); goto err_poll; } usb_sg_wait(&sgr); if (sgr.status < 0) { dev_err(sur40->dev, "error %d in usb_sg_wait\n", sgr.status); goto err_poll; } dev_dbg(sur40->dev, "image acquired\n"); /* return error if streaming was stopped in the meantime */ if (sur40->sequence == -1) return; /* mark as finished */ new_buf->vb.vb2_buf.timestamp = ktime_get_ns(); new_buf->vb.sequence = sur40->sequence++; new_buf->vb.field = V4L2_FIELD_NONE; vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); dev_dbg(sur40->dev, "buffer marked done\n"); return; err_poll: vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } /* Initialize input device parameters. */ static int sur40_input_setup_events(struct input_dev *input_dev) { int error; input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_Y, 0, SENSOR_RES_Y, 0, 0); /* max value unknown, but major/minor axis * can never be larger than screen */ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); error = input_mt_init_slots(input_dev, MAX_CONTACTS, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { dev_err(input_dev->dev.parent, "failed to set up slots\n"); return error; } return 0; } /* Check candidate USB interface. */ static int sur40_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(interface); struct sur40_state *sur40; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct input_dev *input; int error; /* Check if we really have the right interface. */ iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; if (iface_desc->desc.bNumEndpoints < 5) return -ENODEV; /* Use endpoint #4 (0x86). */ endpoint = &iface_desc->endpoint[4].desc; if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) return -ENODEV; /* Allocate memory for our device state and initialize it. */ sur40 = kzalloc(sizeof(*sur40), GFP_KERNEL); if (!sur40) return -ENOMEM; input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_dev; } /* initialize locks/lists */ INIT_LIST_HEAD(&sur40->buf_list); spin_lock_init(&sur40->qlock); mutex_init(&sur40->lock); /* Set up regular input device structure */ input->name = DRIVER_LONG; usb_to_input_id(usbdev, &input->id); usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys)); strlcat(sur40->phys, "/input0", sizeof(sur40->phys)); input->phys = sur40->phys; input->dev.parent = &interface->dev; input->open = sur40_open; input->close = sur40_close; error = sur40_input_setup_events(input); if (error) goto err_free_input; input_set_drvdata(input, sur40); error = input_setup_polling(input, sur40_poll); if (error) { dev_err(&interface->dev, "failed to set up polling"); goto err_free_input; } input_set_poll_interval(input, POLL_INTERVAL); sur40->usbdev = usbdev; sur40->dev = &interface->dev; sur40->input = input; /* use the bulk-in endpoint tested above */ sur40->bulk_in_size = usb_endpoint_maxp(endpoint); sur40->bulk_in_epaddr = endpoint->bEndpointAddress; sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL); if (!sur40->bulk_in_buffer) { dev_err(&interface->dev, "Unable to allocate input buffer."); error = -ENOMEM; goto err_free_input; } /* register the polled input device */ error = input_register_device(input); if (error) { dev_err(&interface->dev, "Unable to register polled input device."); goto err_free_buffer; } /* register the video master device */ snprintf(sur40->v4l2.name, sizeof(sur40->v4l2.name), "%s", DRIVER_LONG); error = v4l2_device_register(sur40->dev, &sur40->v4l2); if (error) { dev_err(&interface->dev, "Unable to register video master device."); goto err_unreg_v4l2; } /* initialize the lock and subdevice */ sur40->queue = sur40_queue; sur40->queue.drv_priv = sur40; sur40->queue.lock = &sur40->lock; sur40->queue.dev = sur40->dev; /* initialize the queue */ error = vb2_queue_init(&sur40->queue); if (error) goto err_unreg_v4l2; sur40->pix_fmt = sur40_pix_format[0]; sur40->vdev = sur40_video_device; sur40->vdev.v4l2_dev = &sur40->v4l2; sur40->vdev.lock = &sur40->lock; sur40->vdev.queue = &sur40->queue; video_set_drvdata(&sur40->vdev, sur40); /* initialize the control handler for 4 controls */ v4l2_ctrl_handler_init(&sur40->hdl, 4); sur40->v4l2.ctrl_handler = &sur40->hdl; sur40->vsvideo = (SUR40_CONTRAST_DEF << 4) | SUR40_GAIN_DEF; v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BRIGHTNESS, SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX, 1, clamp(brightness, (uint)SUR40_BRIGHTNESS_MIN, (uint)SUR40_BRIGHTNESS_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_CONTRAST, SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX, 1, clamp(contrast, (uint)SUR40_CONTRAST_MIN, (uint)SUR40_CONTRAST_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_GAIN, SUR40_GAIN_MIN, SUR40_GAIN_MAX, 1, clamp(gain, (uint)SUR40_GAIN_MIN, (uint)SUR40_GAIN_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, SUR40_BACKLIGHT_MIN, SUR40_BACKLIGHT_MAX, 1, SUR40_BACKLIGHT_DEF); v4l2_ctrl_handler_setup(&sur40->hdl); if (sur40->hdl.error) { dev_err(&interface->dev, "Unable to register video controls."); v4l2_ctrl_handler_free(&sur40->hdl); error = sur40->hdl.error; goto err_unreg_v4l2; } error = video_register_device(&sur40->vdev, VFL_TYPE_TOUCH, -1); if (error) { dev_err(&interface->dev, "Unable to register video subdevice."); goto err_unreg_video; } /* we can register the device now, as it is ready */ usb_set_intfdata(interface, sur40); dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC); return 0; err_unreg_video: video_unregister_device(&sur40->vdev); err_unreg_v4l2: v4l2_device_unregister(&sur40->v4l2); err_free_buffer: kfree(sur40->bulk_in_buffer); err_free_input: input_free_device(input); err_free_dev: kfree(sur40); return error; } /* Unregister device & clean up. */ static void sur40_disconnect(struct usb_interface *interface) { struct sur40_state *sur40 = usb_get_intfdata(interface); v4l2_ctrl_handler_free(&sur40->hdl); video_unregister_device(&sur40->vdev); v4l2_device_unregister(&sur40->v4l2); input_unregister_device(sur40->input); kfree(sur40->bulk_in_buffer); kfree(sur40); usb_set_intfdata(interface, NULL); dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC); } /* * Setup the constraints of the queue: besides setting the number of planes * per buffer and the size and allocation context of each plane, it also * checks if sufficient buffers have been allocated. Usually 3 is a good * minimum number: many DMA engines need a minimum of 2 buffers in the * queue and you need to have another available for userspace processing. */ static int sur40_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct sur40_state *sur40 = vb2_get_drv_priv(q); unsigned int q_num_bufs = vb2_get_num_buffers(q); if (q_num_bufs + *nbuffers < 3) *nbuffers = 3 - q_num_bufs; if (*nplanes) return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0; *nplanes = 1; sizes[0] = sur40->pix_fmt.sizeimage; return 0; } /* * Prepare the buffer for queueing to the DMA engine: check and set the * payload size. */ static int sur40_buffer_prepare(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = sur40->pix_fmt.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } /* * Queue this buffer to the DMA engine. */ static void sur40_buffer_queue(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); struct sur40_buffer *buf = (struct sur40_buffer *)vb; spin_lock(&sur40->qlock); list_add_tail(&buf->list, &sur40->buf_list); spin_unlock(&sur40->qlock); } static void return_all_buffers(struct sur40_state *sur40, enum vb2_buffer_state state) { struct sur40_buffer *buf, *node; spin_lock(&sur40->qlock); list_for_each_entry_safe(buf, node, &sur40->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock(&sur40->qlock); } /* * Start streaming. First check if the minimum number of buffers have been * queued. If not, then return -ENOBUFS and the vb2 framework will call * this function again the next time a buffer has been queued until enough * buffers are available to actually start the DMA engine. */ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); sur40->sequence = 0; return 0; } /* * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued * and passed on to the vb2 framework marked as STATE_ERROR. */ static void sur40_stop_streaming(struct vb2_queue *vq) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); vb2_wait_for_all_buffers(vq); sur40->sequence = -1; /* Release all active buffers */ return_all_buffers(sur40, VB2_BUF_STATE_ERROR); } /* V4L ioctl */ static int sur40_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct sur40_state *sur40 = video_drvdata(file); strscpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver)); strscpy(cap->card, DRIVER_LONG, sizeof(cap->card)); usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int sur40_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_TOUCH; i->std = V4L2_STD_UNKNOWN; strscpy(i->name, "In-Cell Sensor", sizeof(i->name)); i->capabilities = 0; return 0; } static int sur40_vidioc_s_input(struct file *file, void *priv, unsigned int i) { return (i == 0) ? 0 : -EINVAL; } static int sur40_vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int sur40_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: f->fmt.pix = sur40_pix_format[1]; break; default: f->fmt.pix = sur40_pix_format[0]; break; } return 0; } static int sur40_vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: sur40->pix_fmt = sur40_pix_format[1]; break; default: sur40->pix_fmt = sur40_pix_format[0]; break; } f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_s_ctrl(struct v4l2_ctrl *ctrl) { struct sur40_state *sur40 = container_of(ctrl->handler, struct sur40_state, hdl); u8 value = sur40->vsvideo; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sur40_set_irlevel(sur40, ctrl->val); break; case V4L2_CID_CONTRAST: value = (value & 0x0f) | (ctrl->val << 4); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_GAIN: value = (value & 0xf0) | (ctrl->val); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_BACKLIGHT_COMPENSATION: sur40_set_preprocessor(sur40, ctrl->val); break; } return 0; } static int sur40_ioctl_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; p->parm.capture.timeperframe.numerator = 1; p->parm.capture.timeperframe.denominator = 60; p->parm.capture.readbuffers = 3; return 0; } static int sur40_vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(sur40_pix_format)) return -EINVAL; f->pixelformat = sur40_pix_format[f->index].pixelformat; f->flags = 0; return 0; } static int sur40_vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index != 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY))) return -EINVAL; f->type = V4L2_FRMSIZE_TYPE_DISCRETE; f->discrete.width = sur40->pix_fmt.width; f->discrete.height = sur40->pix_fmt.height; return 0; } static int sur40_vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index > 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY)) || (f->width != sur40->pix_fmt.width) || (f->height != sur40->pix_fmt.height)) return -EINVAL; f->type = V4L2_FRMIVAL_TYPE_DISCRETE; f->discrete.denominator = 60; f->discrete.numerator = 1; return 0; } static const struct usb_device_id sur40_table[] = { { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */ { } /* terminating null entry */ }; MODULE_DEVICE_TABLE(usb, sur40_table); /* V4L2 structures */ static const struct vb2_ops sur40_queue_ops = { .queue_setup = sur40_queue_setup, .buf_prepare = sur40_buffer_prepare, .buf_queue = sur40_buffer_queue, .start_streaming = sur40_start_streaming, .stop_streaming = sur40_stop_streaming, }; static const struct vb2_queue sur40_queue = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, /* * VB2_USERPTR in currently not enabled: passing a user pointer to * dma-sg will result in segment sizes that are not a multiple of * 512 bytes, which is required by the host controller. */ .io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF, .buf_struct_size = sizeof(struct sur40_buffer), .ops = &sur40_queue_ops, .mem_ops = &vb2_dma_sg_memops, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, .min_queued_buffers = 3, }; static const struct v4l2_file_operations sur40_video_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .read = vb2_fop_read, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; static const struct v4l2_ioctl_ops sur40_video_ioctl_ops = { .vidioc_querycap = sur40_vidioc_querycap, .vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt, .vidioc_try_fmt_vid_cap = sur40_vidioc_try_fmt, .vidioc_s_fmt_vid_cap = sur40_vidioc_s_fmt, .vidioc_g_fmt_vid_cap = sur40_vidioc_g_fmt, .vidioc_enum_framesizes = sur40_vidioc_enum_framesizes, .vidioc_enum_frameintervals = sur40_vidioc_enum_frameintervals, .vidioc_g_parm = sur40_ioctl_parm, .vidioc_s_parm = sur40_ioctl_parm, .vidioc_enum_input = sur40_vidioc_enum_input, .vidioc_g_input = sur40_vidioc_g_input, .vidioc_s_input = sur40_vidioc_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; static const struct video_device sur40_video_device = { .name = DRIVER_LONG, .fops = &sur40_video_fops, .ioctl_ops = &sur40_video_ioctl_ops, .release = video_device_release_empty, .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; /* USB-specific object needed to register this driver with the USB subsystem. */ static struct usb_driver sur40_driver = { .name = DRIVER_SHORT, .probe = sur40_probe, .disconnect = sur40_disconnect, .id_table = sur40_table, }; module_usb_driver(sur40_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
| 31 3280 3271 3273 3278 3282 3274 3274 3 3286 3285 3286 3300 3292 3302 3293 125 125 125 125 125 233 181 41 98 98 98 98 41 98 86 86 86 86 86 3300 3294 57 37 3306 3299 3296 3301 3302 3294 3275 3276 3255 3278 3281 3252 3277 3297 3252 3254 184 182 3239 1 1 1 1 1 3292 3290 3300 2 3238 3280 3300 3285 3242 3228 3228 3225 9 53 52 3235 3306 34 34 34 34 1027 1026 922 628 88 1027 85 304 206 198 85 304 12 12 12 154 3 3 3 3 3 154 154 35 35 35 37 74 15 15 15 74 22 2 22 22 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 | // SPDX-License-Identifier: GPL-2.0 /* * Released under the GPLv2 only. */ #include <linux/module.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/log2.h> #include <linux/kmsan.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/usb/hcd.h> #include <linux/scatterlist.h> #define to_urb(d) container_of(d, struct urb, kref) static void urb_destroy(struct kref *kref) { struct urb *urb = to_urb(kref); if (urb->transfer_flags & URB_FREE_BUFFER) kfree(urb->transfer_buffer); kfree(urb); } /** * usb_init_urb - initializes a urb so that it can be used by a USB driver * @urb: pointer to the urb to initialize * * Initializes a urb so that the USB subsystem can use it properly. * * If a urb is created with a call to usb_alloc_urb() it is not * necessary to call this function. Only use this if you allocate the * space for a struct urb on your own. If you call this function, be * careful when freeing the memory for your urb that it is no longer in * use by the USB core. * * Only use this function if you _really_ understand what you are doing. */ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); INIT_LIST_HEAD(&urb->urb_list); INIT_LIST_HEAD(&urb->anchor_list); } } EXPORT_SYMBOL_GPL(usb_init_urb); /** * usb_alloc_urb - creates a new urb for a USB driver to use * @iso_packets: number of iso packets for this urb * @mem_flags: the type of memory to allocate, see kmalloc() for a list of * valid options for this. * * Creates an urb for the USB driver to use, initializes a few internal * structures, increments the usage counter, and returns a pointer to it. * * If the driver want to use this urb for interrupt, control, or bulk * endpoints, pass '0' as the number of iso packets. * * The driver must call usb_free_urb() when it is finished with the urb. * * Return: A pointer to the new urb, or %NULL if no memory is available. */ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets), mem_flags); if (!urb) return NULL; usb_init_urb(urb); return urb; } EXPORT_SYMBOL_GPL(usb_alloc_urb); /** * usb_free_urb - frees the memory used by a urb when all users of it are finished * @urb: pointer to the urb to free, may be NULL * * Must be called when a user of a urb is finished with it. When the last user * of the urb calls this function, the memory of the urb is freed. * * Note: The transfer buffer associated with the urb is not freed unless the * URB_FREE_BUFFER transfer flag is set. */ void usb_free_urb(struct urb *urb) { if (urb) kref_put(&urb->kref, urb_destroy); } EXPORT_SYMBOL_GPL(usb_free_urb); /** * usb_get_urb - increments the reference count of the urb * @urb: pointer to the urb to modify, may be NULL * * This must be called whenever a urb is transferred from a device driver to a * host controller driver. This allows proper reference counting to happen * for urbs. * * Return: A pointer to the urb with the incremented reference counter. */ struct urb *usb_get_urb(struct urb *urb) { if (urb) kref_get(&urb->kref); return urb; } EXPORT_SYMBOL_GPL(usb_get_urb); /** * usb_anchor_urb - anchors an URB while it is processed * @urb: pointer to the urb to anchor * @anchor: pointer to the anchor * * This can be called to have access to URBs which are to be executed * without bothering to track them */ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) { unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); usb_get_urb(urb); list_add_tail(&urb->anchor_list, &anchor->urb_list); urb->anchor = anchor; if (unlikely(anchor->poisoned)) atomic_inc(&urb->reject); spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_anchor_urb); static int usb_anchor_check_wakeup(struct usb_anchor *anchor) { return atomic_read(&anchor->suspend_wakeups) == 0 && list_empty(&anchor->urb_list); } /* Callers must hold anchor->lock */ static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) { urb->anchor = NULL; list_del(&urb->anchor_list); usb_put_urb(urb); if (usb_anchor_check_wakeup(anchor)) wake_up(&anchor->wait); } /** * usb_unanchor_urb - unanchors an URB * @urb: pointer to the urb to anchor * * Call this to stop the system keeping track of this URB */ void usb_unanchor_urb(struct urb *urb) { unsigned long flags; struct usb_anchor *anchor; if (!urb) return; anchor = urb->anchor; if (!anchor) return; spin_lock_irqsave(&anchor->lock, flags); /* * At this point, we could be competing with another thread which * has the same intention. To protect the urb from being unanchored * twice, only the winner of the race gets the job. */ if (likely(anchor == urb->anchor)) __usb_unanchor_urb(urb, anchor); spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ static const int pipetypes[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; /** * usb_pipe_type_check - sanity check of a specific pipe for a usb device * @dev: struct usb_device to be checked * @pipe: pipe to check * * This performs a light-weight sanity check for the endpoint in the * given usb device. It returns 0 if the pipe is valid for the specific usb * device, otherwise a negative error code. */ int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe) { const struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(dev, pipe); if (!ep) return -EINVAL; if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(usb_pipe_type_check); /** * usb_urb_ep_type_check - sanity check of endpoint in the given urb * @urb: urb to be checked * * This performs a light-weight sanity check for the endpoint in the * given urb. It returns 0 if the urb contains a valid endpoint, otherwise * a negative error code. */ int usb_urb_ep_type_check(const struct urb *urb) { return usb_pipe_type_check(urb->dev, urb->pipe); } EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * If the submission is successful, the complete() callback from the URB * will be called exactly once, when the USB core and Host Controller Driver * (HCD) are finished with the URB. When the completion function is called, * control of the URB is returned to the device driver which issued the * request. The completion handler may then immediately free or reuse that * URB. * * With few exceptions, USB device drivers should never access URB fields * provided by usbcore or the HCD until its complete() is called. * The exceptions relate to periodic transfer scheduling. For both * interrupt and isochronous urbs, as part of successful URB submission * urb->interval is modified to reflect the actual transfer period used * (normally some power of two units). And for isochronous urbs, * urb->start_frame is modified to reflect when the URB's transfers were * scheduled to start. * * Not all isochronous transfer scheduling policies will work, but most * host controller drivers should easily handle ISO queues going from now * until 10-200 msec into the future. Drivers should try to keep at * least one or two msec of data in the queue; many controllers require * that new transfers start at least 1 msec in the future when they are * added. If the driver is unable to keep up and the queue empties out, * the behavior for new submissions is governed by the URB_ISO_ASAP flag. * If the flag is set, or if the queue is idle, then the URB is always * assigned to the first available (and not yet expired) slot in the * endpoint's schedule. If the flag is not set and the queue is active * then the URB is always assigned to the next slot in the schedule * following the end of the endpoint's previous URB, even if that slot is * in the past. When a packet is assigned in this way to a slot that has * already expired, the packet is not transmitted and the corresponding * usb_iso_packet_descriptor's status field will return -EXDEV. If this * would happen to all the packets in the URB, submission fails with a * -EXDEV error code. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Return: * 0 on successful submissions. A negative error number otherwise. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. With that queuing policy, an endpoint's queue would never * be empty. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queuing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier (successful) requests. * * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * For devices under xHCI, the bandwidth is reserved at configuration time, or * when the alt setting is selected. If there is not enough bus bandwidth, the * configuration/alt setting request will fail. Therefore, submissions to * periodic endpoints on devices under xHCI should never fail due to bandwidth * constraints. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callbacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; int is_out; unsigned int allowed; bool is_eusb2_isoch_double; if (!urb || !urb->complete) return -EINVAL; if (urb->hcpriv) { WARN_ONCE(1, "URB %p submitted while active\n", urb); return -EBUSY; } dev = urb->dev; if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) return -ENODEV; /* For now, get the endpoint from the pipe. Eventually drivers * will be required to set urb->ep directly and we will eliminate * urb->pipe. */ ep = usb_pipe_endpoint(dev, urb->pipe); if (!ep) return -ENOENT; urb->ep = ep; urb->status = -EINPROGRESS; urb->actual_length = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ xfertype = usb_endpoint_type(&ep->desc); if (xfertype == USB_ENDPOINT_XFER_CONTROL) { struct usb_ctrlrequest *setup = (struct usb_ctrlrequest *) urb->setup_packet; if (!setup) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out), "BOGUS control dir, pipe %x doesn't match bRequestType %x\n", urb->pipe, setup->bRequestType); if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) { dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n", le16_to_cpu(setup->wLength), urb->transfer_buffer_length); return -EBADR; } } else { is_out = usb_endpoint_dir_out(&ep->desc); } /* Clear the internal flags and cache the direction for later use */ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | URB_DMA_SG_COMBINED); urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); kmsan_handle_urb(urb, is_out); if (xfertype != USB_ENDPOINT_XFER_CONTROL && dev->state < USB_STATE_CONFIGURED) return -ENODEV; max = usb_endpoint_maxp(&ep->desc); is_eusb2_isoch_double = usb_endpoint_is_hs_isoc_double(dev, ep); if (!max && !is_eusb2_isoch_double) { dev_dbg(&dev->dev, "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", usb_endpoint_num(&ep->desc), is_out ? "out" : "in", __func__, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (xfertype == USB_ENDPOINT_XFER_ISOC) { int n, len; /* SuperSpeed isoc endpoints have up to 16 bursts of up to * 3 packets each */ if (dev->speed >= USB_SPEED_SUPER) { int burst = 1 + ep->ss_ep_comp.bMaxBurst; int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); max *= burst; max *= mult; } if (dev->speed == USB_SPEED_SUPER_PLUS && USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) { struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp; isoc_ep_comp = &ep->ssp_isoc_ep_comp; max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval); } /* High speed, 1-3 packets/uframe, max 6 for eUSB2 double bw */ if (dev->speed == USB_SPEED_HIGH) { if (is_eusb2_isoch_double) max = le32_to_cpu(ep->eusb2_isoc_ep_comp.dwBytesPerInterval); else max *= usb_endpoint_maxp_mult(&ep->desc); } if (urb->number_of_packets <= 0) return -EINVAL; for (n = 0; n < urb->number_of_packets; n++) { len = urb->iso_frame_desc[n].length; if (len < 0 || len > max) return -EMSGSIZE; urb->iso_frame_desc[n].status = -EXDEV; urb->iso_frame_desc[n].actual_length = 0; } } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint) { struct scatterlist *sg; int i; for_each_sg(urb->sg, sg, urb->num_sgs - 1, i) if (sg->length % max) return -EINVAL; } /* the I/O buffer must be mapped/unmapped, except when length=0 */ if (urb->transfer_buffer_length > INT_MAX) return -EMSGSIZE; /* * stuff that drivers shouldn't do, but which shouldn't * cause problems in HCDs if they get it wrong. */ /* Check that the pipe's type matches the endpoint's type */ if (usb_pipe_type_check(urb->dev, urb->pipe)) dev_warn_once(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); /* Check against a simple/standard policy */ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); switch (xfertype) { case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: if (is_out) allowed |= URB_ZERO_PACKET; fallthrough; default: /* all non-iso endpoints */ if (!is_out) allowed |= URB_SHORT_NOT_OK; break; case USB_ENDPOINT_XFER_ISOC: allowed |= URB_ISO_ASAP; break; } allowed &= urb->transfer_flags; /* warn if submitter gave bogus flags */ if (allowed != urb->transfer_flags) dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", urb->transfer_flags, allowed); /* * Force periodic transfer intervals to be legal values that are * a power of two (so HCDs don't need to). * * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC * supports different values... this uses EHCI/UHCI defaults (and * EHCI can use smaller non-default values). */ switch (xfertype) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* too small? */ if (urb->interval <= 0) return -EINVAL; /* too big? */ switch (dev->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: /* units are 125us */ /* Handle up to 2^(16-1) microframes */ if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; break; case USB_SPEED_HIGH: /* units are microframes */ /* NOTE usb handles 2^15 */ if (urb->interval > (1024 * 8)) urb->interval = 1024 * 8; max = 1024 * 8; break; case USB_SPEED_FULL: /* units are frames/msec */ case USB_SPEED_LOW: if (xfertype == USB_ENDPOINT_XFER_INT) { if (urb->interval > 255) return -EINVAL; /* NOTE ohci only handles up to 32 */ max = 128; } else { if (urb->interval > 1024) urb->interval = 1024; /* NOTE usb and ohci handle up to 2^15 */ max = 1024; } break; default: return -EINVAL; } /* Round down to a power of 2, no more than max */ urb->interval = min(max, 1 << ilog2(urb->interval)); } return usb_hcd_submit_urb(urb, mem_flags); } EXPORT_SYMBOL_GPL(usb_submit_urb); /*-------------------------------------------------------------------*/ /** * usb_unlink_urb - abort/cancel a transfer request for an endpoint * @urb: pointer to urb describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. URBs complete only once * per submission, and may be canceled only once per submission. * Successful cancellation means termination of @urb will be expedited * and the completion handler will be called with a status code * indicating that the request has been canceled (rather than any other * code). * * Drivers should not call this routine or related routines, such as * usb_kill_urb(), after their disconnect method has returned. The * disconnect function should synchronize with a driver's I/O routines * to insure that all URB-related activity has completed before it returns. * * This request is asynchronous, however the HCD might call the ->complete() * callback during unlink. Therefore when drivers call usb_unlink_urb(), they * must not hold any locks that may be taken by the completion function. * Success is indicated by returning -EINPROGRESS, at which time the URB will * probably not yet have been given back to the device driver. When it is * eventually called, the completion function will see @urb->status == * -ECONNRESET. * Failure is indicated by usb_unlink_urb() returning any other value. * Unlinking will fail when @urb is not currently "linked" (i.e., it was * never submitted, or it was unlinked before, or the hardware is already * finished with it), even if the completion handler has not yet run. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * Return: -EINPROGRESS on success. See description for other values on * failure. * * Unlinking and Endpoint Queues: * * [The behaviors and guarantees described below do not apply to virtual * root hubs but only to endpoint queues for physical USB devices.] * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller * hardware processes each request. But when an URB terminates with an * error its queue generally stops (see below), at least until that URB's * completion routine returns. It is guaranteed that a stopped queue * will not restart until all its unlinked URBs have been fully retired, * with their completion routines run, even if that's not until some time * after the original completion handler returns. The same behavior and * guarantee apply when an URB terminates because it was unlinked. * * Bulk and interrupt endpoint queues are guaranteed to stop whenever an * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, * and -EREMOTEIO. Control endpoint queues behave the same way except * that they are not guaranteed to stop for -EREMOTEIO errors. Queues * for isochronous endpoints are treated differently, because they must * advance at fixed rates. Such queues do not stop when an URB * encounters an error or is unlinked. An unlinked isochronous URB may * leave a gap in the stream of packets; it is undefined whether such * gaps can be filled in. * * Note that early termination of an URB because a short packet was * received will generate a -EREMOTEIO error if and only if the * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device * drivers can build deep queues for large or complex bulk transfers * and clean them up reliably after any sort of aborted transfer by * unlinking all pending URBs at the first fault. * * When a control URB terminates with an error other than -EREMOTEIO, it * is quite likely that the status stage of the transfer will not take * place. */ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; if (!urb->dev) return -ENODEV; if (!urb->ep) return -EIDRM; return usb_hcd_unlink_urb(urb, -ECONNRESET); } EXPORT_SYMBOL_GPL(usb_unlink_urb); /** * usb_kill_urb - cancel a transfer request and wait for it to finish * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and available for reuse. These features make * this an ideal way to stop I/O in a disconnect() callback or close() * function. If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * While the routine is running, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); /* * Order the write of urb->reject above before the read * of urb->use_count below. Pairs with the barriers in * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). */ smp_mb__after_atomic(); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_kill_urb); /** * usb_poison_urb - reliably kill a transfer and prevent further use of an URB * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and cannot be reused. These features make * this an ideal way to stop I/O in a disconnect() callback. * If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * After and while the routine runs, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_urb(struct urb *urb) { might_sleep(); if (!urb) return; atomic_inc(&urb->reject); /* * Order the write of urb->reject above before the read * of urb->use_count below. Pairs with the barriers in * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). */ smp_mb__after_atomic(); if (!urb->dev || !urb->ep) return; usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); } EXPORT_SYMBOL_GPL(usb_poison_urb); void usb_unpoison_urb(struct urb *urb) { if (!urb) return; atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_unpoison_urb); /** * usb_block_urb - reliably prevent further use of an URB * @urb: pointer to URB to be blocked, may be NULL * * After the routine has run, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. */ void usb_block_urb(struct urb *urb) { if (!urb) return; atomic_inc(&urb->reject); } EXPORT_SYMBOL_GPL(usb_block_urb); /** * usb_kill_anchored_urbs - kill all URBs associated with an anchor * @anchor: anchor the requests are bound to * * This kills all outstanding URBs starting from the back of the queue, * with guarantee that no completer callbacks will take place from the * anchor after this function returns. * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; int surely_empty; do { spin_lock_irq(&anchor->lock); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* make sure the URB isn't freed before we kill it */ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_kill_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } surely_empty = usb_anchor_check_wakeup(anchor); spin_unlock_irq(&anchor->lock); cpu_relax(); } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); /** * usb_poison_anchored_urbs - cease all traffic from an anchor * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be poisoned starting * from the back of the queue. Newly added URBs will also be * poisoned * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; int surely_empty; do { spin_lock_irq(&anchor->lock); anchor->poisoned = 1; while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* make sure the URB isn't freed before we kill it */ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_poison_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } surely_empty = usb_anchor_check_wakeup(anchor); spin_unlock_irq(&anchor->lock); cpu_relax(); } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); /** * usb_unpoison_anchored_urbs - let an anchor be used successfully again * @anchor: anchor the requests are bound to * * Reverses the effect of usb_poison_anchored_urbs * the anchor can be used normally after it returns */ void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) { unsigned long flags; struct urb *lazarus; spin_lock_irqsave(&anchor->lock, flags); list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { usb_unpoison_urb(lazarus); } anchor->poisoned = 0; spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); /** * usb_anchor_suspend_wakeups * @anchor: the anchor you want to suspend wakeups on * * Call this to stop the last urb being unanchored from waking up any * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give- * back path to delay waking up until after the completion handler has run. */ void usb_anchor_suspend_wakeups(struct usb_anchor *anchor) { if (anchor) atomic_inc(&anchor->suspend_wakeups); } EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups); /** * usb_anchor_resume_wakeups * @anchor: the anchor you want to resume wakeups on * * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and * wake up any current waiters if the anchor is empty. */ void usb_anchor_resume_wakeups(struct usb_anchor *anchor) { if (!anchor) return; atomic_dec(&anchor->suspend_wakeups); if (usb_anchor_check_wakeup(anchor)) wake_up(&anchor->wait); } EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups); /** * usb_wait_anchor_empty_timeout - wait for an anchor to be unused * @anchor: the anchor you want to become unused * @timeout: how long you are willing to wait in milliseconds * * Call this is you want to be sure all an anchor's * URBs have finished * * Return: Non-zero if the anchor became unused. Zero on timeout. */ int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout) { return wait_event_timeout(anchor->wait, usb_anchor_check_wakeup(anchor), msecs_to_jiffies(timeout)); } EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); /** * usb_get_from_anchor - get an anchor's oldest urb * @anchor: the anchor whose urb you want * * This will take the oldest urb from an anchor, * unanchor and return it * * Return: The oldest urb from @anchor, or %NULL if @anchor has no * urbs associated with it. */ struct urb *usb_get_from_anchor(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); if (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.next, struct urb, anchor_list); usb_get_urb(victim); __usb_unanchor_urb(victim, anchor); } else { victim = NULL; } spin_unlock_irqrestore(&anchor->lock, flags); return victim; } EXPORT_SYMBOL_GPL(usb_get_from_anchor); /** * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs * @anchor: the anchor whose urbs you want to unanchor * * use this to get rid of all an anchor's urbs */ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; int surely_empty; do { spin_lock_irqsave(&anchor->lock, flags); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); __usb_unanchor_urb(victim, anchor); } surely_empty = usb_anchor_check_wakeup(anchor); spin_unlock_irqrestore(&anchor->lock, flags); cpu_relax(); } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); /** * usb_anchor_empty - is an anchor empty * @anchor: the anchor you want to query * * Return: 1 if the anchor has no urbs associated with it. */ int usb_anchor_empty(struct usb_anchor *anchor) { return list_empty(&anchor->urb_list); } EXPORT_SYMBOL_GPL(usb_anchor_empty); |
| 21 3 1 21 2 21 11 1 1 1 1 1 21 1 4 22 2 22 1 21 21 5 21 7 14 14 14 2 2 1 1 2 14 1 13 1 12 1 11 7 22 10 17 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some logitech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Hendrik Iben */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/usb.h> #include <linux/wait.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #include "hid-lg.h" #include "hid-lg4ff.h" #define LG_RDESC 0x001 #define LG_BAD_RELATIVE_KEYS 0x002 #define LG_DUPLICATE_USAGES 0x004 #define LG_EXPANDED_KEYMAP 0x010 #define LG_IGNORE_DOUBLED_WHEEL 0x020 #define LG_WIRELESS 0x040 #define LG_INVERT_HWHEEL 0x080 #define LG_NOGET 0x100 #define LG_FF 0x200 #define LG_FF2 0x400 #define LG_RDESC_REL_ABS 0x800 #define LG_FF3 0x1000 #define LG_FF4 0x2000 /* Size of the original descriptors of the Driving Force (and Pro) wheels */ #define DF_RDESC_ORIG_SIZE 130 #define DFP_RDESC_ORIG_SIZE 97 #define FV_RDESC_ORIG_SIZE 130 #define MOMO_RDESC_ORIG_SIZE 87 #define MOMO2_RDESC_ORIG_SIZE 87 #define FFG_RDESC_ORIG_SIZE 85 #define FG_RDESC_ORIG_SIZE 82 /* Fixed report descriptors for Logitech Driving Force (and Pro) * wheel controllers * * The original descriptors hide the separate throttle and brake axes in * a custom vendor usage page, providing only a combined value as * GenericDesktop.Y. * These descriptors remove the combined Y axis and instead report * separate throttle (Y) and brake (RZ). */ static const __u8 df_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage (Buttons), */ 0x19, 0x01, /* Usage Minimum (1), */ 0x29, 0x0c, /* Usage Maximum (12), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit (none), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (?: 3), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 dfp_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0E, /* Report Size (14), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x3F, /* Physical Maximum (16383), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0E, /* Report Count (14), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0E, /* Usage Maximum (0Eh), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x04, /* Report Size (4), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Nullstate), */ 0x65, 0x00, /* Unit, */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fv_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0C, /* Usage Maximum (0Ch), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x02, /* Usage (02h), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit, */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (03h), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x08, /* Report Count (8), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x08, /* Usage Maximum (08h), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x75, 0x0E, /* Report Size (14), */ 0x95, 0x01, /* Report Count (1), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo2_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0A, /* Report Count (10), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0A, /* Usage Maximum (0Ah), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 ffg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0xA4, /* Push, */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x95, 0x06, /* Report Count (6), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0xB4, /* Pop, */ 0x81, 0x02, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x04, /* Report Count (4), */ 0x09, 0x02, /* Usage (02h), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection, */ }; /* * Certain Logitech keyboards send in report #3 keys which are far * above the logical maximum described in descriptor. This extends * the original value of 0x28c of logical maximum to 0x104d */ static const __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; } switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: if (*rsize == FG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula GP report descriptor\n"); *rsize = sizeof(fg_rdesc_fixed); return fg_rdesc_fixed; } else { hid_info(hdev, "rdesc size test failed for formula gp\n"); } break; case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: if (*rsize == FFG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula Force GP report descriptor\n"); *rsize = sizeof(ffg_rdesc_fixed); return ffg_rdesc_fixed; } break; /* Several wheels report as this id when operating in emulation mode. */ case USB_DEVICE_ID_LOGITECH_WHEEL: if (*rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); *rsize = sizeof(df_rdesc_fixed); return df_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); *rsize = sizeof(momo_rdesc_fixed); return momo_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); *rsize = sizeof(momo2_rdesc_fixed); return momo2_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); *rsize = sizeof(fv_rdesc_fixed); return fv_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); *rsize = sizeof(dfp_rdesc_fixed); return dfp_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B && rdesc[47] == 0x05 && rdesc[48] == 0x09) { hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n"); rdesc[41] = 0x05; rdesc[42] = 0x09; rdesc[47] = 0x95; rdesc[48] = 0x0B; } break; } return rdesc; } #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int lg_ultrax_remote_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { /* Reported on Logitech Ultra X Media Remote */ case 0x004: lg_map_key_clear(KEY_AGAIN); break; case 0x00d: lg_map_key_clear(KEY_HOME); break; case 0x024: lg_map_key_clear(KEY_SHUFFLE); break; case 0x025: lg_map_key_clear(KEY_TV); break; case 0x026: lg_map_key_clear(KEY_MENU); break; case 0x031: lg_map_key_clear(KEY_AUDIO); break; case 0x032: lg_map_key_clear(KEY_TEXT); break; case 0x033: lg_map_key_clear(KEY_LAST); break; case 0x047: lg_map_key_clear(KEY_MP3); break; case 0x048: lg_map_key_clear(KEY_DVD); break; case 0x049: lg_map_key_clear(KEY_MEDIA); break; case 0x04a: lg_map_key_clear(KEY_VIDEO); break; case 0x04b: lg_map_key_clear(KEY_ANGLE); break; case 0x04c: lg_map_key_clear(KEY_LANGUAGE); break; case 0x04d: lg_map_key_clear(KEY_SUBTITLE); break; case 0x051: lg_map_key_clear(KEY_RED); break; case 0x052: lg_map_key_clear(KEY_CLOSE); break; default: return 0; } return 1; } static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x1001: lg_map_key_clear(KEY_MESSENGER); break; case 0x1003: lg_map_key_clear(KEY_SOUND); break; case 0x1004: lg_map_key_clear(KEY_VIDEO); break; case 0x1005: lg_map_key_clear(KEY_AUDIO); break; case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break; /* The following two entries are Playlist 1 and 2 on the MX3200 */ case 0x100f: lg_map_key_clear(KEY_FN_1); break; case 0x1010: lg_map_key_clear(KEY_FN_2); break; case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break; case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break; case 0x1013: lg_map_key_clear(KEY_CAMERA); break; case 0x1014: lg_map_key_clear(KEY_MESSENGER); break; case 0x1015: lg_map_key_clear(KEY_RECORD); break; case 0x1016: lg_map_key_clear(KEY_PLAYER); break; case 0x1017: lg_map_key_clear(KEY_EJECTCD); break; case 0x1018: lg_map_key_clear(KEY_MEDIA); break; case 0x1019: lg_map_key_clear(KEY_PROG1); break; case 0x101a: lg_map_key_clear(KEY_PROG2); break; case 0x101b: lg_map_key_clear(KEY_PROG3); break; case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break; case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break; case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break; case 0x1023: lg_map_key_clear(KEY_CLOSE); break; case 0x1027: lg_map_key_clear(KEY_MENU); break; /* this one is marked as 'Rotate' */ case 0x1028: lg_map_key_clear(KEY_ANGLE); break; case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break; case 0x102a: lg_map_key_clear(KEY_BACK); break; case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x102d: lg_map_key_clear(KEY_WWW); break; /* The following two are 'Start/answer call' and 'End/reject call' on the MX3200 */ case 0x1031: lg_map_key_clear(KEY_OK); break; case 0x1032: lg_map_key_clear(KEY_CANCEL); break; case 0x1041: lg_map_key_clear(KEY_BATTERY); break; case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break; case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break; case 0x1044: lg_map_key_clear(KEY_PRESENTATION); break; case 0x1045: lg_map_key_clear(KEY_UNDO); break; case 0x1046: lg_map_key_clear(KEY_REDO); break; case 0x1047: lg_map_key_clear(KEY_PRINT); break; case 0x1048: lg_map_key_clear(KEY_SAVE); break; case 0x1049: lg_map_key_clear(KEY_PROG1); break; case 0x104a: lg_map_key_clear(KEY_PROG2); break; case 0x104b: lg_map_key_clear(KEY_PROG3); break; case 0x104c: lg_map_key_clear(KEY_PROG4); break; default: return 0; } return 1; } static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* extended mapping for certain Logitech hardware (Logitech cordless desktop LX500) */ static const u8 e_keymap[] = { 0,216, 0,213,175,156, 0, 0, 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, 0,212, 174,167,152,161,112, 0, 0, 0,154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,183,184,185,186,187, 188,189,190,191,192,193,194, 0, 0, 0 }; struct lg_drv_data *drv_data = hid_get_drvdata(hdev); unsigned int hid = usage->hid; if (hdev->product == USB_DEVICE_ID_LOGITECH_RECEIVER && lg_ultrax_remote_mapping(hi, usage, bit, max)) return 1; if ((drv_data->quirks & LG_WIRELESS) && lg_wireless_mapping(hi, usage, bit, max)) return 1; if ((hid & HID_USAGE_PAGE) != HID_UP_BUTTON) return 0; hid &= HID_USAGE; /* Special handling for Logitech Cordless Desktop */ if (field->application == HID_GD_MOUSE) { if ((drv_data->quirks & LG_IGNORE_DOUBLED_WHEEL) && (hid == 7 || hid == 8)) return -1; } else { if ((drv_data->quirks & LG_EXPANDED_KEYMAP) && hid < ARRAY_SIZE(e_keymap) && e_keymap[hid] != 0) { hid_map_usage(hi, usage, bit, max, EV_KEY, e_keymap[hid]); return 1; } } return 0; } static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_BAD_RELATIVE_KEYS) && usage->type == EV_KEY && (field->flags & HID_MAIN_ITEM_RELATIVE)) field->flags &= ~HID_MAIN_ITEM_RELATIVE; if ((drv_data->quirks & LG_DUPLICATE_USAGES) && (usage->type == EV_KEY || usage->type == EV_REL || usage->type == EV_ABS)) clear_bit(usage->code, *bit); /* Ensure that Logitech wheels are not given a default fuzz/flat value */ if (usage->type == EV_ABS && (usage->code == ABS_X || usage->code == ABS_Y || usage->code == ABS_Z || usage->code == ABS_RZ)) { switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_G29_WHEEL: case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: case USB_DEVICE_ID_LOGITECH_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: case USB_DEVICE_ID_LOGITECH_G25_WHEEL: case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: case USB_DEVICE_ID_LOGITECH_G27_WHEEL: case USB_DEVICE_ID_LOGITECH_WII_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: field->application = HID_GD_MULTIAXIS; break; default: break; } } return 0; } static int lg_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_INVERT_HWHEEL) && usage->code == REL_HWHEEL) { input_event(field->hidinput->input, usage->type, usage->code, -value); return 1; } if (drv_data->quirks & LG_FF4) { return lg4ff_adjust_input_event(hdev, field, usage, value, drv_data); } return 0; } static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) return lg4ff_raw_event(hdev, report, rd, size, drv_data); return 0; } static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct usb_interface *iface; __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; if (!hid_is_usb(hdev)) return -EINVAL; iface = to_usb_interface(hdev->dev.parent); iface_num = iface->cur_altsetting->desc.bInterfaceNumber; /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); return -ENODEV; } drv_data = kzalloc(sizeof(struct lg_drv_data), GFP_KERNEL); if (!drv_data) { hid_err(hdev, "Insufficient memory, cannot allocate driver data\n"); return -ENOMEM; } drv_data->quirks = id->driver_data; hid_set_drvdata(hdev, (void *)drv_data); if (drv_data->quirks & LG_NOGET) hdev->quirks |= HID_QUIRK_NOGET; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } if (drv_data->quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4)) connect_mask &= ~HID_CONNECT_FF; ret = hid_hw_start(hdev, connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } /* Setup wireless link with Logitech Wii wheel */ if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { static const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_stop; } ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret >= 0) { /* insert a little delay of 10 jiffies ~ 40ms */ wait_queue_head_t wait; init_waitqueue_head (&wait); wait_event_interruptible_timeout(wait, 0, msecs_to_jiffies(40)); /* Select random Address */ buf[1] = 0xB2; get_random_bytes(&buf[2], 2); ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); } kfree(buf); } if (drv_data->quirks & LG_FF) ret = lgff_init(hdev); else if (drv_data->quirks & LG_FF2) ret = lg2ff_init(hdev); else if (drv_data->quirks & LG_FF3) ret = lg3ff_init(hdev); else if (drv_data->quirks & LG_FF4) ret = lg4ff_init(hdev); if (ret) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(drv_data); return ret; } static void lg_remove(struct hid_device *hdev) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); hid_hw_stop(hdev); kfree(drv_data); } static const struct hid_device_id lg_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER), .driver_data = LG_RDESC | LG_WIRELESS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER), .driver_data = LG_BAD_RELATIVE_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP), .driver_data = LG_DUPLICATE_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FG), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), .driver_data = LG_NOGET | LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), .driver_data = LG_FF3 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), .driver_data = LG_RDESC_REL_ABS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), .driver_data = LG_RDESC_REL_ABS }, { } }; MODULE_DEVICE_TABLE(hid, lg_devices); static struct hid_driver lg_driver = { .name = "logitech", .id_table = lg_devices, .report_fixup = lg_report_fixup, .input_mapping = lg_input_mapping, .input_mapped = lg_input_mapped, .event = lg_event, .raw_event = lg_raw_event, .probe = lg_probe, .remove = lg_remove, }; module_hid_driver(lg_driver); #ifdef CONFIG_LOGIWHEELS_FF int lg4ff_no_autoswitch = 0; module_param_named(lg4ff_no_autoswitch, lg4ff_no_autoswitch, int, S_IRUGO); MODULE_PARM_DESC(lg4ff_no_autoswitch, "Do not switch multimode wheels to their native mode automatically"); #endif MODULE_DESCRIPTION("HID driver for some logitech \"special\" devices"); MODULE_LICENSE("GPL"); |
| 52 131 131 131 117 117 117 117 120 120 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _NET_NETDEV_LOCK_H #define _NET_NETDEV_LOCK_H #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> static inline bool netdev_trylock(struct net_device *dev) { return mutex_trylock(&dev->lock); } static inline void netdev_assert_locked(const struct net_device *dev) { lockdep_assert_held(&dev->lock); } static inline void netdev_assert_locked_or_invisible(const struct net_device *dev) { if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) netdev_assert_locked(dev); } static inline bool netdev_need_ops_lock(const struct net_device *dev) { bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops; #if IS_ENABLED(CONFIG_NET_SHAPER) ret |= !!dev->netdev_ops->net_shaper_ops; #endif return ret; } static inline void netdev_lock_ops(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_lock(dev); } static inline void netdev_unlock_ops(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_unlock(dev); } static inline void netdev_lock_ops_to_full(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_assert_locked(dev); else netdev_lock(dev); } static inline void netdev_unlock_full_to_ops(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_assert_locked(dev); else netdev_unlock(dev); } static inline void netdev_ops_assert_locked(const struct net_device *dev) { if (netdev_need_ops_lock(dev)) lockdep_assert_held(&dev->lock); else ASSERT_RTNL(); } static inline void netdev_ops_assert_locked_or_invisible(const struct net_device *dev) { if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) netdev_ops_assert_locked(dev); } static inline void netdev_lock_ops_compat(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_lock(dev); else rtnl_lock(); } static inline void netdev_unlock_ops_compat(struct net_device *dev) { if (netdev_need_ops_lock(dev)) netdev_unlock(dev); else rtnl_unlock(); } static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) { if (a == b) return 0; /* Allow locking multiple devices only under rtnl_lock, * the exact order doesn't matter. * Note that upper devices don't lock their ops, so nesting * mostly happens in batched device removal for now. */ return lockdep_rtnl_is_held() ? -1 : 1; } #define netdev_lockdep_set_classes(dev) \ { \ static struct lock_class_key qdisc_tx_busylock_key; \ static struct lock_class_key qdisc_xmit_lock_key; \ static struct lock_class_key dev_addr_list_lock_key; \ static struct lock_class_key dev_instance_lock_key; \ unsigned int i; \ \ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ lockdep_set_class(&(dev)->addr_list_lock, \ &dev_addr_list_lock_key); \ lockdep_set_class(&(dev)->lock, \ &dev_instance_lock_key); \ lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \ for (i = 0; i < (dev)->num_tx_queues; i++) \ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ &qdisc_xmit_lock_key); \ } #define netdev_lock_dereference(p, dev) \ rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock)) int netdev_debug_event(struct notifier_block *nb, unsigned long event, void *ptr); #endif |
| 45 462 16 758 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 | /* SPDX-License-Identifier: GPL-2.0 */ /* thread_info.h: common low-level thread information accessors * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds */ #ifndef _LINUX_THREAD_INFO_H #define _LINUX_THREAD_INFO_H #include <linux/types.h> #include <linux/limits.h> #include <linux/bug.h> #include <linux/restart_block.h> #include <linux/errno.h> #ifdef CONFIG_THREAD_INFO_IN_TASK /* * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, * including <asm/current.h> can cause a circular dependency on some platforms. */ #include <asm/current.h> #define current_thread_info() ((struct thread_info *)current) #endif #include <linux/bitops.h> /* * For per-arch arch_within_stack_frames() implementations, defined in * asm/thread_info.h. */ enum { BAD_STACK = -1, NOT_STACK = 0, GOOD_FRAME, GOOD_STACK, }; #ifdef CONFIG_GENERIC_ENTRY enum syscall_work_bit { SYSCALL_WORK_BIT_SECCOMP, SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT, SYSCALL_WORK_BIT_SYSCALL_TRACE, SYSCALL_WORK_BIT_SYSCALL_EMU, SYSCALL_WORK_BIT_SYSCALL_AUDIT, SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH, SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP, }; #define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP) #define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT) #define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE) #define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU) #define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT) #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH) #define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP) #endif #include <asm/thread_info.h> #ifndef TIF_NEED_RESCHED_LAZY #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY #error Inconsistent PREEMPT_LAZY #endif #define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED #define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED #endif #ifndef TIF_RSEQ # define TIF_RSEQ TIF_NOTIFY_RESUME # define _TIF_RSEQ _TIF_NOTIFY_RESUME #endif #ifdef __KERNEL__ #ifndef arch_set_restart_data #define arch_set_restart_data(restart) do { } while (0) #endif static inline long set_restart_fn(struct restart_block *restart, long (*fn)(struct restart_block *)) { restart->fn = fn; arch_set_restart_data(restart); return -ERESTART_RESTARTBLOCK; } #ifndef THREAD_ALIGN #define THREAD_ALIGN THREAD_SIZE #endif #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions */ static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag, (unsigned long *)&ti->flags); } static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) { clear_bit(flag, (unsigned long *)&ti->flags); } static inline void update_ti_thread_flag(struct thread_info *ti, int flag, bool value) { if (value) set_ti_thread_flag(ti, flag); else clear_ti_thread_flag(ti, flag); } static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_ti_thread_flag(struct thread_info *ti, int flag) { return test_bit(flag, (unsigned long *)&ti->flags); } /* * This may be used in noinstr code, and needs to be __always_inline to prevent * inadvertent instrumentation. */ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti) { return READ_ONCE(ti->flags); } #define set_thread_flag(flag) \ set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ clear_ti_thread_flag(current_thread_info(), flag) #define update_thread_flag(flag, value) \ update_ti_thread_flag(current_thread_info(), flag, value) #define test_and_set_thread_flag(flag) \ test_and_set_ti_thread_flag(current_thread_info(), flag) #define test_and_clear_thread_flag(flag) \ test_and_clear_ti_thread_flag(current_thread_info(), flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) #define read_thread_flags() \ read_ti_thread_flags(current_thread_info()) #define read_task_thread_flags(t) \ read_ti_thread_flags(task_thread_info(t)) #ifdef CONFIG_GENERIC_ENTRY #define set_syscall_work(fl) \ set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define test_syscall_work(fl) \ test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define clear_syscall_work(fl) \ clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define set_task_syscall_work(t, fl) \ set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #define test_task_syscall_work(t, fl) \ test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #define clear_task_syscall_work(t, fl) \ clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #else /* CONFIG_GENERIC_ENTRY */ #define set_syscall_work(fl) \ set_ti_thread_flag(current_thread_info(), TIF_##fl) #define test_syscall_work(fl) \ test_ti_thread_flag(current_thread_info(), TIF_##fl) #define clear_syscall_work(fl) \ clear_ti_thread_flag(current_thread_info(), TIF_##fl) #define set_task_syscall_work(t, fl) \ set_ti_thread_flag(task_thread_info(t), TIF_##fl) #define test_task_syscall_work(t, fl) \ test_ti_thread_flag(task_thread_info(t), TIF_##fl) #define clear_task_syscall_work(t, fl) \ clear_ti_thread_flag(task_thread_info(t), TIF_##fl) #endif /* !CONFIG_GENERIC_ENTRY */ #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H static __always_inline bool tif_test_bit(int bit) { return arch_test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #else static __always_inline bool tif_test_bit(int bit) { return test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ static __always_inline bool tif_need_resched(void) { return tif_test_bit(TIF_NEED_RESCHED); } #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, const void * const stackend, const void *obj, unsigned long len) { return 0; } #endif #ifndef arch_setup_new_exec static inline void arch_setup_new_exec(void) { } #endif void arch_task_cache_init(void); /* for CONFIG_SH */ void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ |
| 14 14 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | // SPDX-License-Identifier: GPL-2.0 /* * Block rq-qos policy for assigning an I/O priority class to requests. * * Using an rq-qos policy for assigning I/O priority class has two advantages * over using the ioprio_set() system call: * * - This policy is cgroup based so it has all the advantages of cgroups. * - While ioprio_set() does not affect page cache writeback I/O, this rq-qos * controller affects page cache writeback I/O for filesystems that support * assiociating a cgroup with writeback I/O. See also * Documentation/admin-guide/cgroup-v2.rst. */ #include <linux/blk-mq.h> #include <linux/blk_types.h> #include <linux/kernel.h> #include <linux/module.h> #include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-rq-qos.h" /** * enum prio_policy - I/O priority class policy. * @POLICY_NO_CHANGE: (default) do not modify the I/O priority class. * @POLICY_PROMOTE_TO_RT: modify no-IOPRIO_CLASS_RT to IOPRIO_CLASS_RT. * @POLICY_RESTRICT_TO_BE: modify IOPRIO_CLASS_NONE and IOPRIO_CLASS_RT into * IOPRIO_CLASS_BE. * @POLICY_ALL_TO_IDLE: change the I/O priority class into IOPRIO_CLASS_IDLE. * @POLICY_NONE_TO_RT: an alias for POLICY_PROMOTE_TO_RT. * * See also <linux/ioprio.h>. */ enum prio_policy { POLICY_NO_CHANGE = 0, POLICY_PROMOTE_TO_RT = 1, POLICY_RESTRICT_TO_BE = 2, POLICY_ALL_TO_IDLE = 3, POLICY_NONE_TO_RT = 4, }; static const char *policy_name[] = { [POLICY_NO_CHANGE] = "no-change", [POLICY_PROMOTE_TO_RT] = "promote-to-rt", [POLICY_RESTRICT_TO_BE] = "restrict-to-be", [POLICY_ALL_TO_IDLE] = "idle", [POLICY_NONE_TO_RT] = "none-to-rt", }; static struct blkcg_policy ioprio_policy; /** * struct ioprio_blkcg - Per cgroup data. * @cpd: blkcg_policy_data structure. * @prio_policy: One of the IOPRIO_CLASS_* values. See also <linux/ioprio.h>. */ struct ioprio_blkcg { struct blkcg_policy_data cpd; enum prio_policy prio_policy; }; static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg) { return container_of(blkcg_to_cpd(blkcg, &ioprio_policy), struct ioprio_blkcg, cpd); } static struct ioprio_blkcg * ioprio_blkcg_from_css(struct cgroup_subsys_state *css) { return blkcg_to_ioprio_blkcg(css_to_blkcg(css)); } static int ioprio_show_prio_policy(struct seq_file *sf, void *v) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf)); seq_printf(sf, "%s\n", policy_name[blkcg->prio_policy]); return 0; } static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(of_css(of)); int ret; if (off != 0) return -EIO; /* kernfs_fop_write_iter() terminates 'buf' with '\0'. */ ret = sysfs_match_string(policy_name, buf); if (ret < 0) return ret; blkcg->prio_policy = ret; return nbytes; } static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp) { struct ioprio_blkcg *blkcg; blkcg = kzalloc(sizeof(*blkcg), gfp); if (!blkcg) return NULL; blkcg->prio_policy = POLICY_NO_CHANGE; return &blkcg->cpd; } static void ioprio_free_cpd(struct blkcg_policy_data *cpd) { struct ioprio_blkcg *blkcg = container_of(cpd, typeof(*blkcg), cpd); kfree(blkcg); } static struct cftype ioprio_files[] = { { .name = "prio.class", .seq_show = ioprio_show_prio_policy, .write = ioprio_set_prio_policy, }, { } /* sentinel */ }; static struct blkcg_policy ioprio_policy = { .dfl_cftypes = ioprio_files, .legacy_cftypes = ioprio_files, .cpd_alloc_fn = ioprio_alloc_cpd, .cpd_free_fn = ioprio_free_cpd, }; void blkcg_set_ioprio(struct bio *bio) { struct ioprio_blkcg *blkcg = blkcg_to_ioprio_blkcg(bio->bi_blkg->blkcg); u16 prio; if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE) return; if (blkcg->prio_policy == POLICY_PROMOTE_TO_RT || blkcg->prio_policy == POLICY_NONE_TO_RT) { /* * For RT threads, the default priority level is 4 because * task_nice is 0. By promoting non-RT io-priority to RT-class * and default level 4, those requests that are already * RT-class but need a higher io-priority can use ioprio_set() * to achieve this. */ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) != IOPRIO_CLASS_RT) bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 4); return; } /* * Except for IOPRIO_CLASS_NONE, higher I/O priority numbers * correspond to a lower priority. Hence, the max_t() below selects * the lower priority of bi_ioprio and the cgroup I/O priority class. * If the bio I/O priority equals IOPRIO_CLASS_NONE, the cgroup I/O * priority is assigned to the bio. */ prio = max_t(u16, bio->bi_ioprio, IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0)); if (prio > bio->bi_ioprio) bio->bi_ioprio = prio; } static int __init ioprio_init(void) { return blkcg_policy_register(&ioprio_policy); } static void __exit ioprio_exit(void) { blkcg_policy_unregister(&ioprio_policy); } module_init(ioprio_init); module_exit(ioprio_exit); |
| 11 3260 213 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NODEMASK_H #define __LINUX_NODEMASK_H /* * Nodemasks provide a bitmap suitable for representing the * set of Node's in a system, one bit position per Node number. * * See detailed comments in the file linux/bitmap.h describing the * data type on which these nodemasks are based. * * For details of nodemask_parse_user(), see bitmap_parse_user() in * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in * lib/bitmap.c. * * The available nodemask operations are: * * void node_set(node, mask) turn on bit 'node' in mask * void node_clear(node, mask) turn off bit 'node' in mask * void nodes_setall(mask) set all bits * void nodes_clear(mask) clear all bits * int node_isset(node, mask) true iff bit 'node' set in mask * int node_test_and_set(node, mask) test and set bit 'node' in mask * * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 * void nodes_complement(dst, src) dst = ~src * * int nodes_equal(mask1, mask2) Does mask1 == mask2? * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? * int nodes_empty(mask) Is mask empty (no bits sets)? * int nodes_full(mask) Is mask full (all bits sets)? * int nodes_weight(mask) Hamming weight - number of set bits * * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, * or MAX_NUMNODES * unsigned int first_unset_node(mask) First node not set in mask, or * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set * NODE_MASK_NONE Initializer - no bits set * unsigned long *nodes_addr(mask) Array of unsigned long's in mask * * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_node_mask(node, mask) for-loop node over mask * * int num_online_nodes() Number of online Nodes * int num_possible_nodes() Number of all possible Nodes * * int node_random(mask) Random node with set bit in mask * * int node_online(node) Is some node online? * int node_possible(node) Is some node possible? * * node_set_online(node) set bit 'node' in node_online_map * node_set_offline(node) clear bit 'node' in node_online_map * * for_each_node(node) for-loop node over node_possible_map * for_each_online_node(node) for-loop node over node_online_map * * Subtlety: * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) * to generate slightly worse code. So use a simple one-line #define * for node_isset(), instead of wrapping an inline inside a macro, the * way we do the other calls. * * NODEMASK_SCRATCH * When doing above logical AND, OR, XOR, Remap operations the callers tend to * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper * for such situations. See below and CPUMASK_ALLOC also. */ #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/minmax.h> #include <linux/nodemask_types.h> #include <linux/random.h> extern nodemask_t _unused_nodemask_arg_; /** * nodemask_pr_args - printf args to output a nodemask * @maskp: nodemask to be printed * * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. */ #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ __nodemask_pr_bits(maskp) static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) { return m ? MAX_NUMNODES : 0; } static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) { return m ? m->bits : NULL; } /* * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline */ #define node_set(node, dst) __node_set((node), &(dst)) static __always_inline void __node_set(int node, volatile nodemask_t *dstp) { set_bit(node, dstp->bits); } #define node_clear(node, dst) __node_clear((node), &(dst)) static __always_inline void __node_clear(int node, volatile nodemask_t *dstp) { clear_bit(node, dstp->bits); } #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) { bitmap_fill(dstp->bits, nbits); } #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) { bitmap_zero(dstp->bits, nbits); } /* No static inline type checking - see Subtlety (1) above. */ #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) #define node_test_and_set(node, nodemask) \ __node_test_and_set((node), &(nodemask)) static __always_inline bool __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } #define nodes_and(dst, src1, src2) \ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_or(dst, src1, src2) \ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_xor(dst, src1, src2) \ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_andnot(dst, src1, src2) \ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_copy(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_copy(dstp->bits, srcp->bits, nbits); } #define nodes_complement(dst, src) \ __nodes_complement(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_complement(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } #define nodes_equal(src1, src2) \ __nodes_equal(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } #define nodes_intersects(src1, src2) \ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } #define nodes_subset(src1, src2) \ __nodes_subset(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) { return bitmap_empty(srcp->bits, nbits); } #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) { return bitmap_full(srcp->bits, nbits); } #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) { return bitmap_weight(srcp->bits, nbits); } /* FIXME: better would be to fix all architectures to never return > MAX_NUMNODES, then the silly min()s could be dropped. */ #define first_node(src) __first_node(&(src)) static __always_inline unsigned int __first_node(const nodemask_t *srcp) { return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) { return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* * Find the next present node in src, starting after node n, wrapping around to * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp) { unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); return ret; } static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); node_set(node, *mask); } #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL << (node); \ } else { \ init_nodemask_of_node(&m, (node)); \ } \ m; \ }) #define first_unset_node(mask) __first_unset_node(&(mask)) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) { return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) #if MAX_NUMNODES <= BITS_PER_LONG #define NODE_MASK_ALL \ ((nodemask_t) { { \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #else #define NODE_MASK_ALL \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #endif #define NODE_MASK_NONE \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ } }) #define nodes_addr(src) ((src).bits) #define nodemask_parse_user(ubuf, ulen, dst) \ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) static __always_inline int __nodemask_parse_user(const char __user *buf, int len, nodemask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } #define node_remap(oldbit, old, new) \ __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) static __always_inline int __node_remap(int oldbit, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } #define nodes_remap(dst, src, old, new) \ __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } #define nodes_onto(dst, orig, relmap) \ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, const nodemask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } #define nodes_fold(dst, orig, sz) \ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } #if MAX_NUMNODES > 1 #define for_each_node_mask(node, mask) \ for ((node) = first_node(mask); \ (node) < MAX_NUMNODES; \ (node) = next_node((node), (mask))) #else /* MAX_NUMNODES == 1 */ #define for_each_node_mask(node, mask) \ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) #endif /* MAX_NUMNODES */ /* * Bitmasks that are kept for all the nodes. */ enum node_states { N_POSSIBLE, /* The node could become online at some point */ N_ONLINE, /* The node is online */ N_NORMAL_MEMORY, /* The node has regular memory */ #ifdef CONFIG_HIGHMEM N_HIGH_MEMORY, /* The node has regular or high memory */ #else N_HIGH_MEMORY = N_NORMAL_MEMORY, #endif N_MEMORY, /* The node has memory(regular, high, movable) */ N_CPU, /* The node has one or more cpus */ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */ NR_NODE_STATES }; /* * The following particular system nodemasks and operations * on them manage all possible and online nodes. */ extern nodemask_t node_states[NR_NODE_STATES]; #if MAX_NUMNODES > 1 static __always_inline int node_state(int node, enum node_states state) { return node_isset(node, node_states[state]); } static __always_inline void node_set_state(int node, enum node_states state) { __node_set(node, &node_states[state]); } static __always_inline void node_clear_state(int node, enum node_states state) { __node_clear(node, &node_states[state]); } static __always_inline int num_node_state(enum node_states state) { return nodes_weight(node_states[state]); } #define for_each_node_state(__node, __state) \ for_each_node_mask((__node), node_states[__state]) #define first_online_node first_node(node_states[N_ONLINE]) #define first_memory_node first_node(node_states[N_MEMORY]) static __always_inline unsigned int next_online_node(int nid) { return next_node(nid, node_states[N_ONLINE]); } static __always_inline unsigned int next_memory_node(int nid) { return next_node(nid, node_states[N_MEMORY]); } extern unsigned int nr_node_ids; extern unsigned int nr_online_nodes; static __always_inline void node_set_online(int nid) { node_set_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } static __always_inline void node_set_offline(int nid) { node_clear_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } #else static __always_inline int node_state(int node, enum node_states state) { return node == 0; } static __always_inline void node_set_state(int node, enum node_states state) { } static __always_inline void node_clear_state(int node, enum node_states state) { } static __always_inline int num_node_state(enum node_states state) { return 1; } #define for_each_node_state(node, __state) \ for ( (node) = 0; (node) == 0; (node) = 1) #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) #define next_memory_node(nid) (MAX_NUMNODES) #define nr_node_ids 1U #define nr_online_nodes 1U #define node_set_online(node) node_set_state((node), N_ONLINE) #define node_set_offline(node) node_clear_state((node), N_ONLINE) #endif static __always_inline int node_random(const nodemask_t *maskp) { #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) int node = find_random_bit(maskp->bits, MAX_NUMNODES); return node < MAX_NUMNODES ? node : NUMA_NO_NODE; #else return 0; #endif } #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] #define num_online_nodes() num_node_state(N_ONLINE) #define num_possible_nodes() num_node_state(N_POSSIBLE) #define node_online(node) node_state((node), N_ONLINE) #define node_possible(node) node_state((node), N_POSSIBLE) #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) #define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU) /* * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ #define NODEMASK_ALLOC(type, name, gfp_flags) \ type *name = kmalloc(sizeof(*name), gfp_flags) #define NODEMASK_FREE(m) kfree(m) #else #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name #define NODEMASK_FREE(m) do {} while (0) #endif /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; }; #define NODEMASK_SCRATCH(x) \ NODEMASK_ALLOC(struct nodemask_scratch, x, \ GFP_KERNEL | __GFP_NORETRY) #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */ |
| 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 | // SPDX-License-Identifier: GPL-2.0 /* * HID driver for Maltron L90 * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2012 David Dillow <dave@thedillows.org> * Copyright (c) 2006-2013 Jiri Kosina * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com> * Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com> * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2018 William Whistler <wtbw@wtbw.co.uk> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* The original buggy USB descriptor */ static const u8 maltron_rdesc_o[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x02, /* Report ID (2) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x01, /* Report Count (1) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x83, /* Usage (Sys Wake Up) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x75, 0x05, /* Report Size (5) */ 0x81, 0x01, /* Input (Const,Array,Abs) */ 0xC0, /* End Collection */ 0x05, 0x0C, /* Usage Page (Consumer) */ 0x09, 0x01, /* Usage (Consumer Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x03, /* Report ID (3) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (Unassigned) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0xC0, /* End Collection */ 0x06, 0x7F, 0xFF, /* Usage Page (Vendor Defined 0xFF7F) */ 0x09, 0x01, /* Usage (0x01) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x04, /* Report ID (4) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (0x00) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0x75, 0x02, /* Report Size (2) */ 0x25, 0x02, /* Logical Maximum (2) */ 0x09, 0x90, /* Usage (0x90) */ 0xB1, 0x02, /* Feature (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0xB1, 0x01, /* Feature (Const,Array,Abs) */ 0x75, 0x01, /* Report Size (1) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x05, 0x08, /* Usage Page (LEDs) */ 0x09, 0x2A, /* Usage (On-Line) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x09, 0x4B, /* Usage (Generic Indicator) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0x95, 0x01, /* Report Count (1) */ 0x91, 0x01, /* Output (Const,Array,Abs) */ 0xC0 /* End Collection */ }; /* The patched descriptor, allowing media key events to be accepted as valid */ static const u8 maltron_rdesc[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x02, /* Report ID (2) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x01, /* Report Count (1) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x83, /* Usage (Sys Wake Up) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x75, 0x05, /* Report Size (5) */ 0x81, 0x01, /* Input (Const,Array,Abs) */ 0xC0, /* End Collection */ 0x05, 0x0C, /* Usage Page (Consumer) */ 0x09, 0x01, /* Usage (Consumer Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x03, /* Report ID (3) */ 0x15, 0x00, /* Logical Minimum (0) - changed */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767) - changed */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (Unassigned) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0xC0, /* End Collection */ 0x06, 0x7F, 0xFF, /* Usage Page (Vendor Defined 0xFF7F) */ 0x09, 0x01, /* Usage (0x01) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x04, /* Report ID (4) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (0x00) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0x75, 0x02, /* Report Size (2) */ 0x25, 0x02, /* Logical Maximum (2) */ 0x09, 0x90, /* Usage (0x90) */ 0xB1, 0x02, /* Feature (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0xB1, 0x01, /* Feature (Const,Array,Abs) */ 0x75, 0x01, /* Report Size (1) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x05, 0x08, /* Usage Page (LEDs) */ 0x09, 0x2A, /* Usage (On-Line) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x09, 0x4B, /* Usage (Generic Indicator) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0x95, 0x01, /* Report Count (1) */ 0x91, 0x01, /* Output (Const,Array,Abs) */ 0xC0 /* End Collection */ }; static const __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == sizeof(maltron_rdesc_o) && !memcmp(maltron_rdesc_o, rdesc, sizeof(maltron_rdesc_o))) { hid_info(hdev, "Replacing Maltron L90 keyboard report descriptor\n"); *rsize = sizeof(maltron_rdesc); return maltron_rdesc; } return rdesc; } static const struct hid_device_id maltron_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_MALTRON_KB)}, { } }; MODULE_DEVICE_TABLE(hid, maltron_devices); static struct hid_driver maltron_driver = { .name = "maltron", .id_table = maltron_devices, .report_fixup = maltron_report_fixup }; module_hid_driver(maltron_driver); MODULE_DESCRIPTION("HID driver for Maltron L90"); MODULE_LICENSE("GPL"); |
| 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 | /* * Header file for reservations for dma-buf and ttm * * Copyright(C) 2011 Linaro Limited. All rights reserved. * Copyright (C) 2012-2013 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> * Thomas Hellstrom <thellstrom-at-vmware-dot-com> * * Based on bo.c which bears the following copyright notice, * but is dual licensed: * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _LINUX_RESERVATION_H #define _LINUX_RESERVATION_H #include <linux/ww_mutex.h> #include <linux/dma-fence.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; struct dma_resv_list; /** * enum dma_resv_usage - how the fences from a dma_resv obj are used * * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and * when the dma_resv object is asked for fences for one use case the fences * for the lower use case are returned as well. * * For example when asking for WRITE fences then the KERNEL fences are returned * as well. Similar when asked for READ fences then both WRITE and KERNEL * fences are returned as well. * * Already used fences can be promoted in the sense that a fence with * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again * with this usage. But fences can never be degraded in the sense that a fence * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ. */ enum dma_resv_usage { /** * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only. * * This should only be used for things like copying or clearing memory * with a DMA hardware engine for the purpose of kernel memory * management. * * Drivers *always* must wait for those fences before accessing the * resource protected by the dma_resv object. The only exception for * that is when the resource is known to be locked down in place by * pinning it previously. */ DMA_RESV_USAGE_KERNEL, /** * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. * * This should only be used for userspace command submissions which add * an implicit write dependency. */ DMA_RESV_USAGE_WRITE, /** * @DMA_RESV_USAGE_READ: Implicit read synchronization. * * This should only be used for userspace command submissions which add * an implicit read dependency. */ DMA_RESV_USAGE_READ, /** * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync. * * This should be used by submissions which don't want to participate in * any implicit synchronization. * * The most common cases are preemption fences, page table updates, TLB * flushes as well as explicitly synced user submissions. * * Explicitly synced user submissions can be promoted to * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using * dma_buf_import_sync_file() when implicit synchronization should * become necessary after initial adding of the fence. */ DMA_RESV_USAGE_BOOKKEEP }; /** * dma_resv_usage_rw - helper for implicit sync * @write: true if we create a new implicit sync write * * This returns the implicit synchronization usage for write or read accesses, * see enum dma_resv_usage and &dma_buf.resv. */ static inline enum dma_resv_usage dma_resv_usage_rw(bool write) { /* This looks confusing at first sight, but is indeed correct. * * The rational is that new write operations needs to wait for the * existing read and write operations to finish. * But a new read operation only needs to wait for the existing write * operations to finish. */ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; } /** * struct dma_resv - a reservation object manages fences for a buffer * * This is a container for dma_fence objects which needs to handle multiple use * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between * different users of the buffer in userspace. See &dma_buf.resv for a more * in-depth discussion. * * The other major use is to manage access and locking within a driver in a * buffer based memory manager. struct ttm_buffer_object is the canonical * example here, since this is where reservation objects originated from. But * use in drivers is spreading and some drivers also manage struct * drm_gem_object with the same scheme. */ struct dma_resv { /** * @lock: * * Update side lock. Don't use directly, instead use the wrapper * functions like dma_resv_lock() and dma_resv_unlock(). * * Drivers which use the reservation object to manage memory dynamically * also use this lock to protect buffer object state like placement, * allocation policies or throughout command submission. */ struct ww_mutex lock; /** * @fences: * * Array of fences which where added to the dma_resv object * * A new fence is added by calling dma_resv_add_fence(). Since this * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be * reserved by calling dma_resv_reserve_fences(). */ struct dma_resv_list __rcu *fences; }; /** * struct dma_resv_iter - current position into the dma_resv fences * * Don't touch this directly in the driver, use the accessor function instead. * * IMPORTANT * * When using the lockless iterators like dma_resv_iter_next_unlocked() or * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. * Code which accumulates statistics or similar needs to check for this with * dma_resv_iter_is_restarted(). */ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ struct dma_resv *obj; /** @usage: Return fences with this usage or lower. */ enum dma_resv_usage usage; /** @fence: the currently handled fence */ struct dma_fence *fence; /** @fence_usage: the usage of the current fence */ enum dma_resv_usage fence_usage; /** @index: index into the shared fences */ unsigned int index; /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; /** @num_fences: number of fences */ unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; }; struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); /** * dma_resv_iter_begin - initialize a dma_resv_iter object * @cursor: The dma_resv_iter object to initialize * @obj: The dma_resv object which we want to iterate over * @usage: controls which fences to include, see enum dma_resv_usage. */ static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, struct dma_resv *obj, enum dma_resv_usage usage) { cursor->obj = obj; cursor->usage = usage; cursor->fence = NULL; } /** * dma_resv_iter_end - cleanup a dma_resv_iter object * @cursor: the dma_resv_iter object which should be cleaned up * * Make sure that the reference to the fence in the cursor is properly * dropped. */ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) { dma_fence_put(cursor->fence); } /** * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * * Returns the usage of the currently processed fence. */ static inline enum dma_resv_usage dma_resv_iter_usage(struct dma_resv_iter *cursor) { return cursor->fence_usage; } /** * dma_resv_iter_is_restarted - test if this is the first fence after a restart * @cursor: the cursor with the current position * * Return true if this is the first fence in an iteration after a restart. */ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) { return cursor->is_restarted; } /** * dma_resv_for_each_fence_unlocked - unlocked fence iterator * @cursor: a struct dma_resv_iter pointer * @fence: the current fence * * Iterate over the fences in a struct dma_resv object without holding the * &dma_resv.lock and using RCU instead. The cursor needs to be initialized * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside * the iterator a reference to the dma_fence is held and the RCU lock dropped. * * Beware that the iterator can be restarted when the struct dma_resv for * @cursor is modified. Code which accumulates statistics or similar needs to * check for this with dma_resv_iter_is_restarted(). For this reason prefer the * lock iterator dma_resv_for_each_fence() whenever possible. */ #define dma_resv_for_each_fence_unlocked(cursor, fence) \ for (fence = dma_resv_iter_first_unlocked(cursor); \ fence; fence = dma_resv_iter_next_unlocked(cursor)) /** * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the * &dma_resv.lock. @all_fences controls if the shared fences are returned as * well. The cursor initialisation is part of the iterator and the fence stays * valid as long as the lock is held and so no extra reference to the fence is * taken. */ #define dma_resv_for_each_fence(cursor, obj, usage, fence) \ for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES void dma_resv_reset_max_fences(struct dma_resv *obj); #else static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** * dma_resv_lock - lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Locks the reservation object for exclusive access and modification. Note, * that the lock is only against other writers, readers will run concurrently * with a writer under RCU. The seqlock is used to notify readers if they * overlap with a writer. * * As the reservation object may be locked by multiple parties in an * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. * * When a die situation is indicated by returning -EDEADLK all locks held by * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. * * Unlocked by calling dma_resv_unlock(). * * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock(&obj->lock, ctx); } /** * dma_resv_lock_interruptible - lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Locks the reservation object interruptible for exclusive access and * modification. Note, that the lock is only against other writers, readers * will run concurrently with a writer under RCU. The seqlock is used to * notify readers if they overlap with a writer. * * As the reservation object may be locked by multiple parties in an * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. * * When a die situation is indicated by returning -EDEADLK all locks held by * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on * @obj. * * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock_interruptible(&obj->lock, ctx); } /** * dma_resv_lock_slow - slowpath lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. * * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { ww_mutex_lock_slow(&obj->lock, ctx); } /** * dma_resv_lock_slow_interruptible - slowpath lock the reservation * object, interruptible * @obj: the reservation object * @ctx: the locking context * * Acquires the reservation object interruptible after a die case. This function * will sleep until the lock becomes available. See * dma_resv_lock_interruptible() as well. */ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); } /** * dma_resv_trylock - trylock the reservation object * @obj: the reservation object * * Tries to lock the reservation object for exclusive access and modification. * Note, that the lock is only against other writers, readers will run * concurrently with a writer under RCU. The seqlock is used to notify readers * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) { return ww_mutex_trylock(&obj->lock, NULL); } /** * dma_resv_is_locked - is the reservation object locked * @obj: the reservation object * * Returns true if the mutex is locked, false if unlocked. */ static inline bool dma_resv_is_locked(struct dma_resv *obj) { return ww_mutex_is_locked(&obj->lock); } /** * dma_resv_locking_ctx - returns the context used to lock the object * @obj: the reservation object * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. * * WARNING: This interface is pretty horrible, but TTM needs it because it * doesn't pass the struct ww_acquire_ctx around in some very long callchains. * Everyone else just uses it to check whether they're holding a reservation or * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { return READ_ONCE(obj->lock.ctx); } /** * dma_resv_unlock - unlock the reservation object * @obj: the reservation object * * Unlocks the reservation object following exclusive access. */ static inline void dma_resv_unlock(struct dma_resv *obj) { dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, enum dma_resv_usage usage); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, struct dma_fence *fence, enum dma_resv_usage usage); int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, bool intr, unsigned long timeout); void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage, ktime_t deadline); bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ |
| 2 2 2 2 2 2 1 2 2 2 2 2 3 5 4 5 5 5 5 2 3 1 2 3 1 3 1 2 1 2 2 2 5 3 3 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 | // SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter * * Copyright (C) 2013-2025 PEAK System-Technik GmbH * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/dev/peak_canfd.h> #include "pcan_usb_core.h" #include "pcan_usb_pro.h" #define PCAN_USBPROFD_CHANNEL_COUNT 2 #define PCAN_USBFD_CHANNEL_COUNT 1 /* PCAN-USB Pro FD adapter internal clock (Hz) */ #define PCAN_UFD_CRYSTAL_HZ 80000000 #define PCAN_UFD_CMD_BUFFER_SIZE 512 #define PCAN_UFD_LOSPD_PKT_SIZE 64 /* PCAN-USB Pro FD command timeout (ms.) */ #define PCAN_UFD_CMD_TIMEOUT_MS 1000 /* PCAN-USB Pro FD rx/tx buffers size */ #define PCAN_UFD_RX_BUFFER_SIZE 2048 #define PCAN_UFD_TX_BUFFER_SIZE 512 /* struct pcan_ufd_fw_info::type */ #define PCAN_USBFD_TYPE_STD 1 #define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */ /* read some versions info from the hw device */ struct __packed pcan_ufd_fw_info { __le16 size_of; /* sizeof this */ __le16 type; /* type of this structure */ u8 hw_type; /* Type of hardware (HW_TYPE_xxx) */ u8 bl_version[3]; /* Bootloader version */ u8 hw_version; /* Hardware version (PCB) */ u8 fw_version[3]; /* Firmware version */ __le32 dev_id[2]; /* "device id" per CAN */ __le32 ser_no; /* S/N */ __le32 flags; /* special functions */ /* extended data when type >= PCAN_USBFD_TYPE_EXT */ u8 cmd_out_ep; /* ep for cmd */ u8 cmd_in_ep; /* ep for replies */ u8 data_out_ep[2]; /* ep for CANx TX */ u8 data_in_ep; /* ep for CAN RX */ u8 dummy[3]; }; /* handle device specific info used by the netdevices */ struct pcan_usb_fd_if { struct peak_usb_device *dev[PCAN_USB_MAX_CHANNEL]; struct pcan_ufd_fw_info fw_info; struct peak_time_ref time_ref; int cm_ignore_count; int dev_opened_count; }; /* device information */ struct pcan_usb_fd_device { struct peak_usb_device dev; struct can_berr_counter bec; struct pcan_usb_fd_if *usb_if; u8 *cmd_buffer_addr; }; /* Extended USB commands (non uCAN commands) */ /* Clock Modes command */ #define PCAN_UFD_CMD_CLK_SET 0x80 #define PCAN_UFD_CLK_80MHZ 0x0 #define PCAN_UFD_CLK_60MHZ 0x1 #define PCAN_UFD_CLK_40MHZ 0x2 #define PCAN_UFD_CLK_30MHZ 0x3 #define PCAN_UFD_CLK_24MHZ 0x4 #define PCAN_UFD_CLK_20MHZ 0x5 #define PCAN_UFD_CLK_DEF PCAN_UFD_CLK_80MHZ struct __packed pcan_ufd_clock { __le16 opcode_channel; u8 mode; u8 unused[5]; }; /* LED control command */ #define PCAN_UFD_CMD_LED_SET 0x86 #define PCAN_UFD_LED_DEV 0x00 #define PCAN_UFD_LED_FAST 0x01 #define PCAN_UFD_LED_SLOW 0x02 #define PCAN_UFD_LED_ON 0x03 #define PCAN_UFD_LED_OFF 0x04 #define PCAN_UFD_LED_DEF PCAN_UFD_LED_DEV struct __packed pcan_ufd_led { __le16 opcode_channel; u8 mode; u8 unused[5]; }; /* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */ #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 struct __packed pcan_ufd_options { __le16 opcode_channel; __le16 ucan_mask; u16 unused; __le16 usb_mask; }; /* Extended usage of uCAN messages for PCAN-USB Pro FD */ #define PCAN_UFD_MSG_CALIBRATION 0x100 struct __packed pcan_ufd_ts_msg { __le16 size; __le16 type; __le32 ts_low; __le32 ts_high; __le16 usb_frame_index; u16 unused; }; #define PCAN_UFD_MSG_OVERRUN 0x101 #define PCAN_UFD_OVMSG_CHANNEL(o) ((o)->channel & 0xf) struct __packed pcan_ufd_ovr_msg { __le16 size; __le16 type; __le32 ts_low; __le32 ts_high; u8 channel; u8 unused[3]; }; #define PCAN_UFD_CMD_DEVID_SET 0x81 struct __packed pcan_ufd_device_id { __le16 opcode_channel; u16 unused; __le32 device_id; }; static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om) { return om->channel & 0xf; } /* Clock mode frequency values */ static const u32 pcan_usb_fd_clk_freq[6] = { [PCAN_UFD_CLK_80MHZ] = 80000000, [PCAN_UFD_CLK_60MHZ] = 60000000, [PCAN_UFD_CLK_40MHZ] = 40000000, [PCAN_UFD_CLK_30MHZ] = 30000000, [PCAN_UFD_CLK_24MHZ] = 24000000, [PCAN_UFD_CLK_20MHZ] = 20000000 }; /* return a device USB interface */ static inline struct pcan_usb_fd_if *pcan_usb_fd_dev_if(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); return pdev->usb_if; } /* return a device USB commands buffer */ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); return pdev->cmd_buffer_addr; } /* send PCAN-USB Pro FD commands synchronously */ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; int packet_len; ptrdiff_t cmd_len; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; /* if a packet is not filled completely by commands, the command list * is terminated with an "end of collection" record. */ cmd_len = cmd_tail - cmd_head; if (cmd_len <= (PCAN_UFD_CMD_BUFFER_SIZE - sizeof(u64))) { memset(cmd_tail, 0xff, sizeof(u64)); cmd_len += sizeof(u64); } packet_ptr = cmd_head; packet_len = cmd_len; /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep), packet_ptr, packet_len, NULL, PCAN_UFD_CMD_TIMEOUT_MS); if (err) { netdev_err(dev->netdev, "sending command failure: %d\n", err); break; } packet_ptr += packet_len; cmd_len -= packet_len; if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) packet_len = cmd_len; } while (packet_len > 0); return err; } static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev, struct pcan_ufd_fw_info *fw_info) { return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, fw_info, sizeof(*fw_info)); } /* build the commands list in the given buffer, to enter operational mode */ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) { struct pucan_wr_err_cnt *prc; struct pucan_command *cmd; u8 *pc = buf; /* 1st, reset error counters: */ prc = (struct pucan_wr_err_cnt *)pc; prc->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_WR_ERR_CNT); /* select both counters */ prc->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE|PUCAN_WRERRCNT_RE); /* and reset their values */ prc->tx_counter = 0; prc->rx_counter = 0; /* moves the pointer forward */ pc += sizeof(struct pucan_wr_err_cnt); /* add command to switch from ISO to non-ISO mode, if fw allows it */ if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) { struct pucan_options *puo = (struct pucan_options *)pc; puo->opcode_channel = (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_CLR_DIS_OPTION) : pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_SET_EN_OPTION); puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); /* to be sure that no other extended bits will be taken into * account */ puo->unused = 0; /* moves the pointer forward */ pc += sizeof(struct pucan_options); } /* next, go back to operational mode */ cmd = (struct pucan_command *)pc; cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ? PUCAN_CMD_LISTEN_ONLY_MODE : PUCAN_CMD_NORMAL_MODE); pc += sizeof(struct pucan_command); return pc - buf; } /* set CAN bus on/off */ static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff) { u8 *pc = pcan_usb_fd_cmd_buffer(dev); int l; if (onoff) { /* build the cmds list to enter operational mode */ l = pcan_usb_fd_build_restart_cmd(dev, pc); } else { struct pucan_command *cmd = (struct pucan_command *)pc; /* build cmd to go back to reset mode */ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_RESET_MODE); l = sizeof(struct pucan_command); } /* send the command */ return pcan_usb_fd_send_cmd(dev, pc + l); } /* set filtering masks: * * idx in range [0..63] selects a row #idx, all rows otherwise * mask in range [0..0xffffffff] defines up to 32 CANIDs in the row(s) * * Each bit of this 64 x 32 bits array defines a CANID value: * * bit[i,j] = 1 implies that CANID=(i x 32)+j will be received, while * bit[i,j] = 0 implies that CANID=(i x 32)+j will be discarded. */ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, u32 mask) { struct pucan_filter_std *cmd = pcan_usb_fd_cmd_buffer(dev); int i, n; /* select all rows when idx is out of range [0..63] */ if ((idx < 0) || (idx >= (1 << PUCAN_FLTSTD_ROW_IDX_BITS))) { n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS; idx = 0; /* select the row (and only the row) otherwise */ } else { n = idx + 1; } for (i = idx; i < n; i++, cmd++) { cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_FILTER_STD); cmd->idx = cpu_to_le16(i); cmd->mask = cpu_to_le32(mask); } /* send the command */ return pcan_usb_fd_send_cmd(dev, cmd); } /* set/unset options * * onoff set(1)/unset(0) options * mask each bit defines a kind of options to set/unset */ static int pcan_usb_fd_set_options(struct peak_usb_device *dev, bool onoff, u16 ucan_mask, u16 usb_mask) { struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (onoff) ? PUCAN_CMD_SET_EN_OPTION : PUCAN_CMD_CLR_DIS_OPTION); cmd->ucan_mask = cpu_to_le16(ucan_mask); cmd->usb_mask = cpu_to_le16(usb_mask); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* setup LED control */ static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode) { struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_LED_SET); cmd->mode = led_mode; /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set CAN clock domain */ static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev, u8 clk_mode) { struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_CLK_SET); cmd->mode = clk_mode; /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set bittiming for CAN and CAN-FD header */ static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev, struct can_bittiming *bt) { struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_SLOW); cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1, dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); cmd->tseg2 = PUCAN_TSLOW_TSEG2(bt->phase_seg2 - 1); cmd->tseg1 = PUCAN_TSLOW_TSEG1(bt->prop_seg + bt->phase_seg1 - 1); cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(bt->brp - 1)); cmd->ewl = 96; /* default */ /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set CAN-FD bittiming for data */ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev, struct can_bittiming *bt) { struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_FAST); cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1); cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1); cmd->tseg1 = PUCAN_TFAST_TSEG1(bt->prop_seg + bt->phase_seg1 - 1); cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(bt->brp - 1)); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* read user CAN channel id from device */ static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { int err; struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev); err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info); if (err) return err; *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]); return err; } /* set a new CAN channel id in the flash memory of the device */ static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) { struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_DEVID_SET); cmd->device_id = cpu_to_le32(can_ch_id); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* handle restart but in asynchronously way * (uses PCAN-USB Pro code to complete asynchronous request) */ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; u8 *pc = buf; /* build the entire cmds list in the provided buffer, to go back into * operational mode. */ pc += pcan_usb_fd_build_restart_cmd(dev, pc); /* add EOC */ memset(pc, 0xff, sizeof(struct pucan_command)); pc += sizeof(struct pucan_command); /* complete the URB */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep), buf, pc - buf, pcan_usb_pro_restart_complete, dev); /* and submit it. */ return usb_submit_urb(urb, GFP_ATOMIC); } static int pcan_usb_fd_drv_loaded(struct peak_usb_device *dev, bool loaded) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); pdev->cmd_buffer_addr[0] = 0; pdev->cmd_buffer_addr[1] = !!loaded; return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, PCAN_USBPRO_FCT_DRVLD, pdev->cmd_buffer_addr, PCAN_USBPRO_FCT_DRVLD_REQ_LEN); } static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; struct peak_usb_device *dev; struct net_device *netdev; struct canfd_frame *cfd; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(rm->flags); if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) return -ENOMEM; dev = usb_if->dev[pucan_msg_get_channel(rm)]; netdev = dev->netdev; if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(netdev, &cfd); if (!skb) return -ENOMEM; if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH) cfd->flags |= CANFD_BRS; if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) cfd->flags |= CANFD_ESI; cfd->len = can_fd_dlc2len(pucan_msg_get_dlc(rm)); } else { /* CAN 2.0 frame case */ skb = alloc_can_skb(netdev, (struct can_frame **)&cfd); if (!skb) return -ENOMEM; can_frame_set_cc_len((struct can_frame *)cfd, pucan_msg_get_dlc(rm), dev->can.ctrlmode); } cfd->can_id = le32_to_cpu(rm->can_id); if (rx_msg_flags & PUCAN_MSG_EXT_ID) cfd->can_id |= CAN_EFF_FLAG; if (rx_msg_flags & PUCAN_MSG_RTR) { cfd->can_id |= CAN_RTR_FLAG; } else { memcpy(cfd->data, rm->d, cfd->len); netdev->stats.rx_bytes += cfd->len; } netdev->stats.rx_packets++; peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high)); return 0; } /* handle uCAN status message */ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; struct pcan_usb_fd_device *pdev; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; enum can_state rx_state, tx_state; struct peak_usb_device *dev; struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) return -ENOMEM; dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; pdev = container_of(dev, struct pcan_usb_fd_device, dev); netdev = dev->netdev; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; if (sm->channel_p_w_b & PUCAN_BUS_BUSOFF) { new_state = CAN_STATE_BUS_OFF; } else if (sm->channel_p_w_b & PUCAN_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) { new_state = CAN_STATE_ERROR_WARNING; } else { /* back to (or still in) ERROR_ACTIVE state */ new_state = CAN_STATE_ERROR_ACTIVE; pdev->bec.txerr = 0; pdev->bec.rxerr = 0; } /* state hasn't changed */ if (new_state == dev->can.state) return 0; /* handle bus state change */ tx_state = (pdev->bec.txerr >= pdev->bec.rxerr) ? new_state : 0; rx_state = (pdev->bec.txerr <= pdev->bec.rxerr) ? new_state : 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); can_change_state(netdev, cf, tx_state, rx_state); /* things must be done even in case of OOM */ if (new_state == CAN_STATE_BUS_OFF) can_bus_off(netdev); if (!skb) return -ENOMEM; peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high)); return 0; } /* handle uCAN error message */ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; struct pcan_usb_fd_device *pdev; struct peak_usb_device *dev; if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) return -EINVAL; dev = usb_if->dev[pucan_ermsg_get_channel(er)]; pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* keep a trace of tx and rx error counters for later use */ pdev->bec.txerr = er->tx_err_cnt; pdev->bec.rxerr = er->rx_err_cnt; return 0; } /* handle uCAN overrun message */ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; struct peak_usb_device *dev; struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) return -EINVAL; dev = usb_if->dev[pufd_omsg_get_channel(ov)]; netdev = dev->netdev; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); if (!skb) return -ENOMEM; cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high)); netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; return 0; } /* handle USB calibration message */ static void pcan_usb_fd_decode_ts(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ts_msg *ts = (struct pcan_ufd_ts_msg *)rx_msg; /* should wait until clock is stabilized */ if (usb_if->cm_ignore_count > 0) usb_if->cm_ignore_count--; else peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts_low)); } /* callback for bulk IN urb */ static int pcan_usb_fd_decode_buf(struct peak_usb_device *dev, struct urb *urb) { struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev); struct net_device *netdev = dev->netdev; struct pucan_msg *rx_msg; u8 *msg_ptr, *msg_end; int err = 0; /* loop reading all the records from the incoming message */ msg_ptr = urb->transfer_buffer; msg_end = urb->transfer_buffer + urb->actual_length; for (; msg_ptr < msg_end;) { u16 rx_msg_type, rx_msg_size; rx_msg = (struct pucan_msg *)msg_ptr; if (!rx_msg->size) { /* null packet found: end of list */ break; } rx_msg_size = le16_to_cpu(rx_msg->size); rx_msg_type = le16_to_cpu(rx_msg->type); /* check if the record goes out of current packet */ if (msg_ptr + rx_msg_size > msg_end) { netdev_err(netdev, "got frag rec: should inc usb rx buf sze\n"); err = -EBADMSG; break; } switch (rx_msg_type) { case PUCAN_MSG_CAN_RX: err = pcan_usb_fd_decode_canmsg(usb_if, rx_msg); if (err < 0) goto fail; break; case PCAN_UFD_MSG_CALIBRATION: pcan_usb_fd_decode_ts(usb_if, rx_msg); break; case PUCAN_MSG_ERROR: err = pcan_usb_fd_decode_error(usb_if, rx_msg); if (err < 0) goto fail; break; case PUCAN_MSG_STATUS: err = pcan_usb_fd_decode_status(usb_if, rx_msg); if (err < 0) goto fail; break; case PCAN_UFD_MSG_OVERRUN: err = pcan_usb_fd_decode_overrun(usb_if, rx_msg); if (err < 0) goto fail; break; default: netdev_err(netdev, "unhandled msg type 0x%02x (%d): ignored\n", rx_msg_type, rx_msg_type); break; } msg_ptr += rx_msg_size; } fail: if (err) pcan_dump_mem("received msg", urb->transfer_buffer, urb->actual_length); return err; } /* CAN/CANFD frames encoding callback */ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u16 tx_msg_size, tx_msg_flags; u8 dlc; if (cfd->len > CANFD_MAX_DLEN) return -EINVAL; tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); tx_msg->size = cpu_to_le16(tx_msg_size); tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); tx_msg_flags = 0; if (cfd->can_id & CAN_EFF_FLAG) { tx_msg_flags |= PUCAN_MSG_EXT_ID; tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK); } else { tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK); } if (can_is_canfd_skb(skb)) { /* considering a CANFD frame */ dlc = can_fd_len2dlc(cfd->len); tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN; if (cfd->flags & CANFD_BRS) tx_msg_flags |= PUCAN_MSG_BITRATE_SWITCH; if (cfd->flags & CANFD_ESI) tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAND 2.0 frames */ dlc = can_get_cc_dlc((struct can_frame *)cfd, dev->can.ctrlmode); if (cfd->can_id & CAN_RTR_FLAG) tx_msg_flags |= PUCAN_MSG_RTR; } /* Single-Shot frame */ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT; tx_msg->flags = cpu_to_le16(tx_msg_flags); tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); memcpy(tx_msg->d, cfd->data, cfd->len); /* add null size message to tag the end (messages are 32-bits aligned) */ tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size); tx_msg->size = 0; /* set the whole size of the USB packet to send */ *size = tx_msg_size + sizeof(u32); return 0; } /* start the interface (last chance before set bus on) */ static int pcan_usb_fd_start(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); int err; /* set filter mode: all acceptance */ err = pcan_usb_fd_set_filter_std(dev, -1, 0xffffffff); if (err) return err; /* opening first device: */ if (pdev->usb_if->dev_opened_count == 0) { /* reset time_ref */ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro_fd); /* enable USB calibration messages */ err = pcan_usb_fd_set_options(dev, 1, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); } pdev->usb_if->dev_opened_count++; /* reset cached error counters */ pdev->bec.txerr = 0; pdev->bec.rxerr = 0; return err; } /* socket callback used to copy berr counters values received through USB */ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct peak_usb_device *dev = netdev_priv(netdev); struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); *bec = pdev->bec; /* must return 0 */ return 0; } /* probe function for all PCAN-USB FD family usb interfaces */ static int pcan_usb_fd_probe(struct usb_interface *intf) { struct usb_host_interface *iface_desc = &intf->altsetting[0]; /* CAN interface is always interface #0 */ return iface_desc->desc.bInterfaceNumber; } /* stop interface (last chance before set bus off) */ static int pcan_usb_fd_stop(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* turn off special msgs for that interface if no other dev opened */ if (pdev->usb_if->dev_opened_count == 1) pcan_usb_fd_set_options(dev, 0, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); pdev->usb_if->dev_opened_count--; return 0; } /* called when probing, to initialize a device object */ static int pcan_usb_fd_init(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info; int i, err = -ENOMEM; /* do this for 1st channel only */ if (!dev->prev_siblings) { /* allocate netdevices common structure attached to first one */ pdev->usb_if = kzalloc(sizeof(*pdev->usb_if), GFP_KERNEL); if (!pdev->usb_if) goto err_out; /* allocate command buffer once for all for the interface */ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE, GFP_KERNEL); if (!pdev->cmd_buffer_addr) goto err_out_1; /* number of ts msgs to ignore before taking one into account */ pdev->usb_if->cm_ignore_count = 5; fw_info = &pdev->usb_if->fw_info; err = pcan_usb_fd_read_fwinfo(dev, fw_info); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", dev->adapter->name, err); goto err_out_2; } /* explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx (channel) device. */ dev_info(dev->netdev->dev.parent, "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n", dev->adapter->name, fw_info->hw_version, fw_info->fw_version[0], fw_info->fw_version[1], fw_info->fw_version[2], dev->adapter->ctrl_count); /* check for ability to switch between ISO/non-ISO modes */ if (fw_info->fw_version[0] >= 2) { /* firmware >= 2.x supports ISO/non-ISO switching */ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; } else { /* firmware < 2.x only supports fixed(!) non-ISO */ dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; } /* if vendor rsp type is greater than or equal to 2, then it * contains EP numbers to use for cmds pipes. If not, then * default EP should be used. */ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) { fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT; fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN; } /* tell the hardware the can driver is running */ err = pcan_usb_fd_drv_loaded(dev, 1); if (err) { dev_err(dev->netdev->dev.parent, "unable to tell %s driver is loaded (err %d)\n", dev->adapter->name, err); goto err_out_2; } } else { /* otherwise, simply copy previous sibling's values */ struct pcan_usb_fd_device *ppdev = container_of(dev->prev_siblings, struct pcan_usb_fd_device, dev); pdev->usb_if = ppdev->usb_if; pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; /* do a copy of the ctrlmode[_supported] too */ dev->can.ctrlmode = ppdev->dev.can.ctrlmode; dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; fw_info = &pdev->usb_if->fw_info; } pdev->usb_if->dev[dev->ctrl_idx] = dev; dev->can_channel_id = le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]); /* if vendor rsp type is greater than or equal to 2, then it contains EP * numbers to use for data pipes. If not, then statically defined EP are * used (see peak_usb_create_dev()). */ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) { dev->ep_msg_in = fw_info->data_in_ep; dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx]; } /* set clock domain */ for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++) if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i]) break; if (i >= ARRAY_SIZE(pcan_usb_fd_clk_freq)) { dev_warn(dev->netdev->dev.parent, "incompatible clock frequencies\n"); err = -EINVAL; goto err_out_2; } pcan_usb_fd_set_clock_domain(dev, i); /* set LED in default state (end of init phase) */ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); return 0; err_out_2: kfree(pdev->cmd_buffer_addr); err_out_1: kfree(pdev->usb_if); err_out: return err; } /* called when driver module is being unloaded */ static void pcan_usb_fd_exit(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* when rmmod called before unplug and if down, should reset things * before leaving */ if (dev->can.state != CAN_STATE_STOPPED) { /* set bus off on the corresponding channel */ pcan_usb_fd_set_bus(dev, 0); } /* switch off corresponding CAN LEDs */ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_OFF); /* if channel #0 (only) */ if (dev->ctrl_idx == 0) { /* turn off calibration message if any device were opened */ if (pdev->usb_if->dev_opened_count > 0) pcan_usb_fd_set_options(dev, 0, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); /* tell USB adapter that the driver is being unloaded */ pcan_usb_fd_drv_loaded(dev, 0); } } /* called when the USB adapter is unplugged */ static void pcan_usb_fd_free(struct peak_usb_device *dev) { /* last device: can free shared objects now */ if (!dev->prev_siblings && !dev->next_siblings) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* free commands buffer */ kfree(pdev->cmd_buffer_addr); /* free usb interface object */ kfree(pdev->usb_if); } } /* blink LED's */ static int pcan_usb_fd_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (state) { case ETHTOOL_ID_ACTIVE: err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST); break; case ETHTOOL_ID_INACTIVE: err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); break; default: break; } return err; } static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { .set_phys_id = pcan_usb_fd_set_phys_id, .get_ts_info = pcan_get_ts_info, .get_eeprom_len = peak_usb_get_eeprom_len, .get_eeprom = peak_usb_get_eeprom, .set_eeprom = peak_usb_set_eeprom, }; /* describes the PCAN-USB FD adapter */ static const struct can_bittiming_const pcan_usb_fd_const = { .name = "pcan_usb_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_fd_data_const = { .name = "pcan_usb_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_fd = { .name = "PCAN-USB FD", .device_id = PCAN_USBFD_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_fd_const, .data_bittiming_const = &pcan_usb_fd_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_fd_probe, .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-CHIP USB */ static const struct can_bittiming_const pcan_usb_chip_const = { .name = "pcan_chip_usb", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_chip_data_const = { .name = "pcan_chip_usb", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_chip = { .name = "PCAN-Chip USB", .device_id = PCAN_USBCHIP_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_chip_const, .data_bittiming_const = &pcan_usb_chip_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-USB Pro FD adapter */ static const struct can_bittiming_const pcan_usb_pro_fd_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_pro_fd = { .name = "PCAN-USB Pro FD", .device_id = PCAN_USBPROFD_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_pro_fd_const, .data_bittiming_const = &pcan_usb_pro_fd_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-USB X6 adapter */ static const struct can_bittiming_const pcan_usb_x6_const = { .name = "pcan_usb_x6", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_x6_data_const = { .name = "pcan_usb_x6", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_x6 = { .name = "PCAN-USB X6", .device_id = PCAN_USBX6_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_x6_const, .data_bittiming_const = &pcan_usb_x6_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; |
| 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the s5k83a sensor * * Copyright (C) 2008 Erik Andrén * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kthread.h> #include "m5602_s5k83a.h" static int s5k83a_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops s5k83a_ctrl_ops = { .s_ctrl = s5k83a_s_ctrl, }; static struct v4l2_pix_format s5k83a_modes[] = { { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 } }; static const unsigned char preinit_s5k83a[][4] = { {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00}, }; /* This could probably be considerably shortened. I don't have the hardware to experiment with it, patches welcome */ static const unsigned char init_s5k83a[][4] = { /* The following sequence is useless after a clean boot but is necessary after resume from suspend */ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00}, {SENSOR, 0xaf, 0x01, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00}, {SENSOR, 0x7b, 0xff, 0x00}, {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00}, {SENSOR, 0x01, 0x50, 0x00}, {SENSOR, 0x12, 0x20, 0x00}, {SENSOR, 0x17, 0x40, 0x00}, {SENSOR, 0x1c, 0x00, 0x00}, {SENSOR, 0x02, 0x70, 0x00}, {SENSOR, 0x03, 0x0b, 0x00}, {SENSOR, 0x04, 0xf0, 0x00}, {SENSOR, 0x05, 0x0b, 0x00}, {SENSOR, 0x06, 0x71, 0x00}, {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */ {SENSOR, 0x08, 0x02, 0x00}, {SENSOR, 0x09, 0x88, 0x00}, /* 648 */ {SENSOR, 0x14, 0x00, 0x00}, {SENSOR, 0x15, 0x20, 0x00}, /* 32 */ {SENSOR, 0x19, 0x00, 0x00}, {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */ {SENSOR, 0x0f, 0x02, 0x00}, {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */ /* normal colors (this is value after boot, but after tries can be different) */ {SENSOR, 0x00, 0x06, 0x00}, }; static const unsigned char start_s5k83a[][4] = { {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00}, {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, }; static void s5k83a_dump_registers(struct sd *sd); static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data); static int s5k83a_set_led_indication(struct sd *sd, u8 val); static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev, __s32 vflip, __s32 hflip); int s5k83a_probe(struct sd *sd) { u8 prod_id = 0, ver_id = 0; int i, err = 0; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { if (force_sensor == S5K83A_SENSOR) { pr_info("Forcing a %s sensor\n", s5k83a.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this * one */ return -ENODEV; } gspca_dbg(gspca_dev, D_PROBE, "Probing for a s5k83a sensor\n"); /* Preinit the sensor */ for (i = 0; i < ARRAY_SIZE(preinit_s5k83a) && !err; i++) { u8 data[2] = {preinit_s5k83a[i][2], preinit_s5k83a[i][3]}; if (preinit_s5k83a[i][0] == SENSOR) err = m5602_write_sensor(sd, preinit_s5k83a[i][1], data, 2); else err = m5602_write_bridge(sd, preinit_s5k83a[i][1], data[0]); } /* We don't know what register (if any) that contain the product id * Just pick the first addresses that seem to produce the same results * on multiple machines */ if (m5602_read_sensor(sd, 0x00, &prod_id, 1)) return -ENODEV; if (m5602_read_sensor(sd, 0x01, &ver_id, 1)) return -ENODEV; if ((prod_id == 0xff) || (ver_id == 0xff)) return -ENODEV; else pr_info("Detected a s5k83a sensor\n"); sensor_found: sd->gspca_dev.cam.cam_mode = s5k83a_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k83a_modes); /* null the pointer! thread is't running now */ sd->rotation_thread = NULL; return 0; } int s5k83a_init(struct sd *sd) { int i, err = 0; for (i = 0; i < ARRAY_SIZE(init_s5k83a) && !err; i++) { u8 data[2] = {0x00, 0x00}; switch (init_s5k83a[i][0]) { case BRIDGE: err = m5602_write_bridge(sd, init_s5k83a[i][1], init_s5k83a[i][2]); break; case SENSOR: data[0] = init_s5k83a[i][2]; err = m5602_write_sensor(sd, init_s5k83a[i][1], data, 1); break; case SENSOR_LONG: data[0] = init_s5k83a[i][2]; data[1] = init_s5k83a[i][3]; err = m5602_write_sensor(sd, init_s5k83a[i][1], data, 2); break; default: pr_info("Invalid stream command, exiting init\n"); return -EINVAL; } } if (dump_sensor) s5k83a_dump_registers(sd); return err; } int s5k83a_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; sd->gspca_dev.vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 6); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, S5K83A_DEFAULT_BRIGHTNESS); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_EXPOSURE, 0, S5K83A_MAXIMUM_EXPOSURE, 1, S5K83A_DEFAULT_EXPOSURE); v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, S5K83A_DEFAULT_GAIN); sd->hflip = v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &s5k83a_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->hflip); return 0; } static int rotation_thread_function(void *data) { struct sd *sd = (struct sd *) data; u8 reg, previous_rotation = 0; __s32 vflip, hflip; set_current_state(TASK_INTERRUPTIBLE); while (!schedule_timeout(msecs_to_jiffies(100))) { if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock)) break; s5k83a_get_rotation(sd, ®); if (previous_rotation != reg) { previous_rotation = reg; pr_info("Camera was flipped\n"); hflip = sd->hflip->val; vflip = sd->vflip->val; if (reg) { vflip = !vflip; hflip = !hflip; } s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip); } mutex_unlock(&sd->gspca_dev.usb_lock); set_current_state(TASK_INTERRUPTIBLE); } /* return to "front" flip */ if (previous_rotation) { hflip = sd->hflip->val; vflip = sd->vflip->val; s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip); } sd->rotation_thread = NULL; return 0; } int s5k83a_start(struct sd *sd) { int i, err = 0; /* Create another thread, polling the GPIO ports of the camera to check if it got rotated. This is how the windows driver does it so we have to assume that there is no better way of accomplishing this */ sd->rotation_thread = kthread_run(rotation_thread_function, sd, "rotation thread"); if (IS_ERR(sd->rotation_thread)) { err = PTR_ERR(sd->rotation_thread); sd->rotation_thread = NULL; return err; } /* Preinit the sensor */ for (i = 0; i < ARRAY_SIZE(start_s5k83a) && !err; i++) { u8 data[2] = {start_s5k83a[i][2], start_s5k83a[i][3]}; if (start_s5k83a[i][0] == SENSOR) err = m5602_write_sensor(sd, start_s5k83a[i][1], data, 2); else err = m5602_write_bridge(sd, start_s5k83a[i][1], data[0]); } if (err < 0) return err; return s5k83a_set_led_indication(sd, 1); } int s5k83a_stop(struct sd *sd) { if (sd->rotation_thread) kthread_stop(sd->rotation_thread); return s5k83a_set_led_indication(sd, 0); } void s5k83a_disconnect(struct sd *sd) { s5k83a_stop(sd); sd->sensor = NULL; } static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 data[2]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0x00; data[1] = 0x20; err = m5602_write_sensor(sd, 0x14, data, 2); if (err < 0) return err; data[0] = 0x01; data[1] = 0x00; err = m5602_write_sensor(sd, 0x0d, data, 2); if (err < 0) return err; /* FIXME: This is not sane, we need to figure out the composition of these registers */ data[0] = val >> 3; /* gain, high 5 bits */ data[1] = val >> 1; /* gain, high 7 bits */ err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2); return err; } static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val) { u8 data[1]; struct sd *sd = (struct sd *) gspca_dev; data[0] = val; return m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1); } static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { u8 data[2]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0; data[1] = val; return m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2); } static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev, __s32 vflip, __s32 hflip) { int err; u8 data[1]; struct sd *sd = (struct sd *) gspca_dev; data[0] = 0x05; err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); if (err < 0) return err; /* six bit is vflip, seven is hflip */ data[0] = S5K83A_FLIP_MASK; data[0] = (vflip) ? data[0] | 0x40 : data[0]; data[0] = (hflip) ? data[0] | 0x80 : data[0]; err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1); if (err < 0) return err; data[0] = (vflip) ? 0x0b : 0x0a; err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1); if (err < 0) return err; data[0] = (hflip) ? 0x0a : 0x0b; err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1); return err; } static int s5k83a_set_hvflip(struct gspca_dev *gspca_dev) { int err; u8 reg; struct sd *sd = (struct sd *) gspca_dev; int hflip = sd->hflip->val; int vflip = sd->vflip->val; err = s5k83a_get_rotation(sd, ®); if (err < 0) return err; if (reg) { hflip = !hflip; vflip = !vflip; } err = s5k83a_set_flip_real(gspca_dev, vflip, hflip); return err; } static int s5k83a_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); int err; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: err = s5k83a_set_brightness(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE: err = s5k83a_set_exposure(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: err = s5k83a_set_gain(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: err = s5k83a_set_hvflip(gspca_dev); break; default: return -EINVAL; } return err; } static int s5k83a_set_led_indication(struct sd *sd, u8 val) { int err = 0; u8 data[1]; err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, data); if (err < 0) return err; if (val) data[0] = data[0] | S5K83A_GPIO_LED_MASK; else data[0] = data[0] & ~S5K83A_GPIO_LED_MASK; err = m5602_write_bridge(sd, M5602_XB_GPIO_DAT, data[0]); return err; } /* Get camera rotation on Acer notebooks */ static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data) { int err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, reg_data); *reg_data = (*reg_data & S5K83A_GPIO_ROTATION_MASK) ? 0 : 1; return err; } static void s5k83a_dump_registers(struct sd *sd) { int address; u8 page, old_page; m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); for (page = 0; page < 16; page++) { m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); pr_info("Dumping the s5k83a register state for page 0x%x\n", page); for (address = 0; address <= 0xff; address++) { u8 val = 0; m5602_read_sensor(sd, address, &val, 1); pr_info("register 0x%x contains 0x%x\n", address, val); } } pr_info("s5k83a register state dump complete\n"); for (page = 0; page < 16; page++) { m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); pr_info("Probing for which registers that are read/write for page 0x%x\n", page); for (address = 0; address <= 0xff; address++) { u8 old_val, ctrl_val, test_val = 0xff; m5602_read_sensor(sd, address, &old_val, 1); m5602_write_sensor(sd, address, &test_val, 1); m5602_read_sensor(sd, address, &ctrl_val, 1); if (ctrl_val == test_val) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original val */ m5602_write_sensor(sd, address, &old_val, 1); } } pr_info("Read/write register probing complete\n"); m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); } |
| 76 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * inet6 interface/address list definitions * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> */ #ifndef _NET_IF_INET6_H #define _NET_IF_INET6_H #include <net/snmp.h> #include <linux/ipv6.h> #include <linux/refcount.h> /* inet6_dev.if_flags */ #define IF_RA_OTHERCONF 0x80 #define IF_RA_MANAGED 0x40 #define IF_RA_RCVD 0x20 #define IF_RS_SENT 0x10 #define IF_READY 0x80000000 enum { INET6_IFADDR_STATE_PREDAD, INET6_IFADDR_STATE_DAD, INET6_IFADDR_STATE_POSTDAD, INET6_IFADDR_STATE_ERRDAD, INET6_IFADDR_STATE_DEAD, }; struct inet6_ifaddr { struct in6_addr addr; __u32 prefix_len; __u32 rt_priority; /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 valid_lft; __u32 prefered_lft; refcount_t refcnt; spinlock_t lock; int state; __u32 flags; __u8 dad_probes; __u8 stable_privacy_retry; __u16 scope; __u64 dad_nonce; unsigned long cstamp; /* created timestamp */ unsigned long tstamp; /* updated timestamp */ struct delayed_work dad_work; struct inet6_dev *idev; struct fib6_info *rt; struct hlist_node addr_lst; struct list_head if_list; /* * Used to safely traverse idev->addr_list in process context * if the idev->lock needed to protect idev->addr_list cannot be held. * In that case, add the items to this list temporarily and iterate * without holding idev->lock. * See addrconf_ifdown and dev_forward_change. */ struct list_head if_list_aux; struct list_head tmp_list; struct inet6_ifaddr *ifpub; int regen_count; bool tokenized; u8 ifa_proto; struct rcu_head rcu; struct in6_addr peer_addr; }; struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; struct in6_addr sl_addr[] __counted_by(sl_max); }; #define IP6_SFBLOCK 10 /* allocate this many at once */ struct ipv6_mc_socklist { struct in6_addr addr; int ifindex; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ipv6_mc_socklist __rcu *next; struct ip6_sf_socklist __rcu *sflist; struct rcu_head rcu; }; struct ip6_sf_list { struct ip6_sf_list __rcu *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2]; /* include/exclude counts */ unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ struct rcu_head rcu; }; #define MAF_TIMER_RUNNING 0x01 #define MAF_LAST_REPORTER 0x02 #define MAF_LOADED 0x04 #define MAF_NOREPORT 0x08 #define MAF_GSQUERY 0x10 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 __rcu *next; struct ip6_sf_list __rcu *mca_sources; struct ip6_sf_list __rcu *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct delayed_work mca_work; unsigned int mca_flags; int mca_users; refcount_t mca_refcnt; unsigned long mca_cstamp; unsigned long mca_tstamp; struct rcu_head rcu; }; /* Anycast stuff */ struct ipv6_ac_socklist { struct in6_addr acl_addr; int acl_ifindex; struct ipv6_ac_socklist *acl_next; }; struct ifacaddr6 { struct in6_addr aca_addr; struct fib6_info *aca_rt; struct ifacaddr6 __rcu *aca_next; struct hlist_node aca_addr_lst; int aca_users; refcount_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; struct rcu_head rcu; }; #define IFA_HOST IPV6_ADDR_LOOPBACK #define IFA_LINK IPV6_ADDR_LINKLOCAL #define IFA_SITE IPV6_ADDR_SITELOCAL struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; DEFINE_SNMP_STAT(struct ipstats_mib, ipv6); DEFINE_SNMP_STAT_ATOMIC(struct icmpv6_mib_device, icmpv6dev); DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib_device, icmpv6msgdev); }; struct inet6_dev { struct net_device *dev; netdevice_tracker dev_tracker; struct list_head addr_list; struct ifmcaddr6 __rcu *mc_list; struct ifmcaddr6 __rcu *mc_tomb; unsigned char mc_qrv; /* Query Robustness Variable */ unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */ unsigned long mc_qi; /* Query Interval */ unsigned long mc_qri; /* Query Response Interval */ unsigned long mc_maxdelay; struct delayed_work mc_gq_work; /* general query work */ struct delayed_work mc_ifc_work; /* interface change work */ struct delayed_work mc_dad_work; /* dad complete mc work */ struct delayed_work mc_query_work; /* mld query work */ struct delayed_work mc_report_work; /* mld report work */ struct sk_buff_head mc_query_queue; /* mld query queue */ struct sk_buff_head mc_report_queue; /* mld report queue */ spinlock_t mc_query_lock; /* mld query queue lock */ spinlock_t mc_report_lock; /* mld query report lock */ struct mutex mc_lock; /* mld global lock */ struct ifacaddr6 __rcu *ac_list; rwlock_t lock; refcount_t refcnt; __u32 if_flags; int dead; u32 desync_factor; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __s32 rs_interval; /* in jiffies */ __u8 rs_probes; unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; unsigned int ra_mtu; }; static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) { /* * +-------+-------+-------+-------+-------+-------+ * | 33 | 33 | DST13 | DST14 | DST15 | DST16 | * +-------+-------+-------+-------+-------+-------+ */ buf[0]= 0x33; buf[1]= 0x33; memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf) { buf[0] = 0x00; } static inline void ipv6_ib_mc_map(const struct in6_addr *addr, const unsigned char *broadcast, char *buf) { unsigned char scope = broadcast[5] & 0xF; buf[0] = 0; /* Reserved */ buf[1] = 0xff; /* Multicast QPN */ buf[2] = 0xff; buf[3] = 0xff; buf[4] = 0xff; buf[5] = 0x10 | scope; /* scope from broadcast address */ buf[6] = 0x60; /* IPv6 signature */ buf[7] = 0x1b; buf[8] = broadcast[8]; /* P_Key */ buf[9] = broadcast[9]; memcpy(buf + 10, addr->s6_addr + 6, 10); } static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr, const unsigned char *broadcast, char *buf) { if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) { memcpy(buf, broadcast, 4); } else { /* v4mapped? */ if ((addr->s6_addr32[0] | addr->s6_addr32[1] | (addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0) return -EINVAL; memcpy(buf, &addr->s6_addr32[3], 4); } return 0; } #endif |
| 8 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 | // SPDX-License-Identifier: GPL-2.0+ /* * User-space Probes (UProbes) * * Copyright (C) IBM Corporation, 2008-2012 * Authors: * Srikar Dronamraju * Jim Keniston * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra */ #include <linux/kernel.h> #include <linux/highmem.h> #include <linux/pagemap.h> /* read_mapping_page */ #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/export.h> #include <linux/rmap.h> /* anon_vma_prepare */ #include <linux/mmu_notifier.h> #include <linux/swap.h> /* folio_free_swap */ #include <linux/ptrace.h> /* user_enable_single_step */ #include <linux/kdebug.h> /* notifier mechanism */ #include <linux/percpu-rwsem.h> #include <linux/task_work.h> #include <linux/shmem_fs.h> #include <linux/khugepaged.h> #include <linux/rcupdate_trace.h> #include <linux/workqueue.h> #include <linux/srcu.h> #include <linux/oom.h> /* check_stable_address_space */ #include <linux/pagewalk.h> #include <linux/uprobes.h> #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE static struct rb_root uprobes_tree = RB_ROOT; /* * allows us to skip the uprobe_mmap if there are no uprobe events active * at this time. Probably a fine grained per inode count is better? */ #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); #define UPROBES_HASH_SZ 13 /* serialize uprobe->pending_list */ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); /* Covers return_instance's uprobe lifetime. */ DEFINE_STATIC_SRCU(uretprobes_srcu); /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 struct uprobe { struct rb_node rb_node; /* node in the rb tree */ refcount_t ref; struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; struct list_head pending_list; struct list_head consumers; struct inode *inode; /* Also hold a ref to inode */ union { struct rcu_head rcu; struct work_struct work; }; loff_t offset; loff_t ref_ctr_offset; unsigned long flags; /* "unsigned long" so bitops work */ /* * The generic code assumes that it has two members of unknown type * owned by the arch-specific code: * * insn - copy_insn() saves the original instruction here for * arch_uprobe_analyze_insn(). * * ixol - potentially modified instruction to execute out of * line, copied to xol_area by xol_get_insn_slot(). */ struct arch_uprobe arch; }; struct delayed_uprobe { struct list_head list; struct uprobe *uprobe; struct mm_struct *mm; }; static DEFINE_MUTEX(delayed_uprobe_lock); static LIST_HEAD(delayed_uprobe_list); /* * Execute out of line area: anonymous executable mapping installed * by the probed task to execute the copy of the original instruction * mangled by set_swbp(). * * On a breakpoint hit, thread contests for a slot. It frees the * slot after singlestep. Currently a fixed number of slots are * allocated. */ struct xol_area { wait_queue_head_t wq; /* if all slots are busy */ unsigned long *bitmap; /* 0 = free slot */ struct page *page; /* * We keep the vma's vm_start rather than a pointer to the vma * itself. The probed process or a naughty kernel module could make * the vma go away, and we must handle that reasonably gracefully. */ unsigned long vaddr; /* Page(s) of instruction slots */ }; static void uprobe_warn(struct task_struct *t, const char *msg) { pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg); } /* * valid_vma: Verify if the specified vma is an executable vma * Relax restrictions while unregistering: vm_flags might have * changed after breakpoint was inserted. * - is_register: indicates if we are in register context. * - Return 1 if the specified virtual address is in an * executable vma. */ static bool valid_vma(struct vm_area_struct *vma, bool is_register) { vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; if (is_register) flags |= VM_WRITE; return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; } static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) { return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); } static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) { return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); } /** * is_swbp_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_swbp_insn * Returns true if @insn is a breakpoint instruction. */ bool __weak is_swbp_insn(uprobe_opcode_t *insn) { return *insn == UPROBE_SWBP_INSN; } /** * is_trap_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_trap_insn * Returns true if @insn is a breakpoint instruction. * * This function is needed for the case where an architecture has multiple * trap instructions (like powerpc). */ bool __weak is_trap_insn(uprobe_opcode_t *insn) { return is_swbp_insn(insn); } void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) { void *kaddr = kmap_atomic(page); memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); kunmap_atomic(kaddr); } static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) { void *kaddr = kmap_atomic(page); memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); kunmap_atomic(kaddr); } static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *insn, int nbytes, void *data) { uprobe_opcode_t old_opcode; bool is_swbp; /* * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. * We do not check if it is any other 'trap variant' which could * be conditional trap instruction such as the one powerpc supports. * * The logic is that we do not care if the underlying instruction * is a trap variant; uprobes always wins over any other (gdb) * breakpoint. */ uprobe_copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); is_swbp = is_swbp_insn(&old_opcode); if (is_swbp_insn(insn)) { if (is_swbp) /* register: already installed? */ return 0; } else { if (!is_swbp) /* unregister: was it changed by us? */ return 0; } return 1; } static struct delayed_uprobe * delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) { struct delayed_uprobe *du; list_for_each_entry(du, &delayed_uprobe_list, list) if (du->uprobe == uprobe && du->mm == mm) return du; return NULL; } static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) { struct delayed_uprobe *du; if (delayed_uprobe_check(uprobe, mm)) return 0; du = kzalloc(sizeof(*du), GFP_KERNEL); if (!du) return -ENOMEM; du->uprobe = uprobe; du->mm = mm; list_add(&du->list, &delayed_uprobe_list); return 0; } static void delayed_uprobe_delete(struct delayed_uprobe *du) { if (WARN_ON(!du)) return; list_del(&du->list); kfree(du); } static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) { struct list_head *pos, *q; struct delayed_uprobe *du; if (!uprobe && !mm) return; list_for_each_safe(pos, q, &delayed_uprobe_list) { du = list_entry(pos, struct delayed_uprobe, list); if (uprobe && du->uprobe != uprobe) continue; if (mm && du->mm != mm) continue; delayed_uprobe_delete(du); } } static bool valid_ref_ctr_vma(struct uprobe *uprobe, struct vm_area_struct *vma) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); return uprobe->ref_ctr_offset && vma->vm_file && file_inode(vma->vm_file) == uprobe->inode && (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && vma->vm_start <= vaddr && vma->vm_end > vaddr; } static struct vm_area_struct * find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *tmp; for_each_vma(vmi, tmp) if (valid_ref_ctr_vma(uprobe, tmp)) return tmp; return NULL; } static int __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) { void *kaddr; struct page *page; int ret; short *ptr; if (!vaddr || !d) return -EINVAL; ret = get_user_pages_remote(mm, vaddr, 1, FOLL_WRITE, &page, NULL); if (unlikely(ret <= 0)) { /* * We are asking for 1 page. If get_user_pages_remote() fails, * it may return 0, in that case we have to return error. */ return ret == 0 ? -EBUSY : ret; } kaddr = kmap_atomic(page); ptr = kaddr + (vaddr & ~PAGE_MASK); if (unlikely(*ptr + d < 0)) { pr_warn("ref_ctr going negative. vaddr: 0x%lx, " "curr val: %d, delta: %d\n", vaddr, *ptr, d); ret = -EINVAL; goto out; } *ptr += d; ret = 0; out: kunmap_atomic(kaddr); put_page(page); return ret; } static void update_ref_ctr_warn(struct uprobe *uprobe, struct mm_struct *mm, short d) { pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%p\n", d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, (unsigned long long) uprobe->offset, (unsigned long long) uprobe->ref_ctr_offset, mm); } static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, short d) { struct vm_area_struct *rc_vma; unsigned long rc_vaddr; int ret = 0; rc_vma = find_ref_ctr_vma(uprobe, mm); if (rc_vma) { rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); ret = __update_ref_ctr(mm, rc_vaddr, d); if (ret) update_ref_ctr_warn(uprobe, mm, d); if (d > 0) return ret; } mutex_lock(&delayed_uprobe_lock); if (d > 0) ret = delayed_uprobe_add(uprobe, mm); else delayed_uprobe_remove(uprobe, mm); mutex_unlock(&delayed_uprobe_lock); return ret; } static bool orig_page_is_identical(struct vm_area_struct *vma, unsigned long vaddr, struct page *page, bool *pmd_mappable) { const pgoff_t index = vaddr_to_offset(vma, vaddr) >> PAGE_SHIFT; struct folio *orig_folio = filemap_get_folio(vma->vm_file->f_mapping, index); struct page *orig_page; bool identical; if (IS_ERR(orig_folio)) return false; orig_page = folio_file_page(orig_folio, index); *pmd_mappable = folio_test_pmd_mappable(orig_folio); identical = folio_test_uptodate(orig_folio) && pages_identical(page, orig_page); folio_put(orig_folio); return identical; } static int __uprobe_write(struct vm_area_struct *vma, struct folio_walk *fw, struct folio *folio, unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes, bool is_register) { const unsigned long vaddr = insn_vaddr & PAGE_MASK; bool pmd_mappable; /* For now, we'll only handle PTE-mapped folios. */ if (fw->level != FW_LEVEL_PTE) return -EFAULT; /* * See can_follow_write_pte(): we'd actually prefer a writable PTE here, * but the VMA might not be writable. */ if (!pte_write(fw->pte)) { if (!PageAnonExclusive(fw->page)) return -EFAULT; if (unlikely(userfaultfd_pte_wp(vma, fw->pte))) return -EFAULT; /* SOFTDIRTY is handled via pte_mkdirty() below. */ } /* * We'll temporarily unmap the page and flush the TLB, such that we can * modify the page atomically. */ flush_cache_page(vma, vaddr, pte_pfn(fw->pte)); fw->pte = ptep_clear_flush(vma, vaddr, fw->ptep); copy_to_page(fw->page, insn_vaddr, insn, nbytes); /* * When unregistering, we may only zap a PTE if uffd is disabled and * there are no unexpected folio references ... */ if (is_register || userfaultfd_missing(vma) || (folio_ref_count(folio) != folio_expected_ref_count(folio) + 1)) goto remap; /* * ... and the mapped page is identical to the original page that * would get faulted in on next access. */ if (!orig_page_is_identical(vma, vaddr, fw->page, &pmd_mappable)) goto remap; dec_mm_counter(vma->vm_mm, MM_ANONPAGES); folio_remove_rmap_pte(folio, fw->page, vma); if (!folio_mapped(folio) && folio_test_swapcache(folio) && folio_trylock(folio)) { folio_free_swap(folio); folio_unlock(folio); } folio_put(folio); return pmd_mappable; remap: /* * Make sure that our copy_to_page() changes become visible before the * set_pte_at() write. */ smp_wmb(); /* We modified the page. Make sure to mark the PTE dirty. */ set_pte_at(vma->vm_mm, vaddr, fw->ptep, pte_mkdirty(fw->pte)); return 0; } /* * NOTE: * Expect the breakpoint instruction to be the smallest size instruction for * the architecture. If an arch has variable length instruction and the * breakpoint instruction is not of the smallest length instruction * supported by that architecture then we need to modify is_trap_at_addr and * uprobe_write_opcode accordingly. This would never be a problem for archs * that have fixed length instructions. * * uprobe_write_opcode - write the opcode at a given virtual address. * @auprobe: arch specific probepoint information. * @vma: the probed virtual memory area. * @opcode_vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @opcode_vaddr. * * Called with mm->mmap_lock held for write. * Return 0 (success) or a negative errno. */ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long opcode_vaddr, uprobe_opcode_t opcode, bool is_register) { return uprobe_write(auprobe, vma, opcode_vaddr, &opcode, UPROBE_SWBP_INSN_SIZE, verify_opcode, is_register, true /* do_update_ref_ctr */, NULL); } int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes, uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr, void *data) { const unsigned long vaddr = insn_vaddr & PAGE_MASK; struct mm_struct *mm = vma->vm_mm; struct uprobe *uprobe; int ret, ref_ctr_updated = 0; unsigned int gup_flags = FOLL_FORCE; struct mmu_notifier_range range; struct folio_walk fw; struct folio *folio; struct page *page; uprobe = container_of(auprobe, struct uprobe, arch); if (WARN_ON_ONCE(!is_cow_mapping(vma->vm_flags))) return -EINVAL; /* * When registering, we have to break COW to get an exclusive anonymous * page that we can safely modify. Use FOLL_WRITE to trigger a write * fault if required. When unregistering, we might be lucky and the * anon page is already gone. So defer write faults until really * required. Use FOLL_SPLIT_PMD, because __uprobe_write() * cannot deal with PMDs yet. */ if (is_register) gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD; retry: ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &page, NULL); if (ret <= 0) goto out; folio = page_folio(page); ret = verify(page, insn_vaddr, insn, nbytes, data); if (ret <= 0) { folio_put(folio); goto out; } /* We are going to replace instruction, update ref_ctr. */ if (do_update_ref_ctr && !ref_ctr_updated && uprobe->ref_ctr_offset) { ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); if (ret) { folio_put(folio); goto out; } ref_ctr_updated = 1; } ret = 0; if (unlikely(!folio_test_anon(folio) || folio_is_zone_device(folio))) { VM_WARN_ON_ONCE(is_register); folio_put(folio); goto out; } if (!is_register) { /* * In the common case, we'll be able to zap the page when * unregistering. So trigger MMU notifiers now, as we won't * be able to do it under PTL. */ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vaddr, vaddr + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); } ret = -EAGAIN; /* Walk the page tables again, to perform the actual update. */ if (folio_walk_start(&fw, vma, vaddr, 0)) { if (fw.page == page) ret = __uprobe_write(vma, &fw, folio, insn_vaddr, insn, nbytes, is_register); folio_walk_end(&fw, vma); } if (!is_register) mmu_notifier_invalidate_range_end(&range); folio_put(folio); switch (ret) { case -EFAULT: gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD; fallthrough; case -EAGAIN: goto retry; default: break; } out: /* Revert back reference counter if instruction update failed. */ if (do_update_ref_ctr && ret < 0 && ref_ctr_updated) update_ref_ctr(uprobe, mm, is_register ? -1 : 1); /* try collapse pmd for compound page */ if (ret > 0) collapse_pte_mapped_thp(mm, vaddr, false); return ret < 0 ? ret : 0; } /** * set_swbp - store breakpoint at a given address. * @auprobe: arch specific probepoint information. * @vma: the probed virtual memory area. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, store the breakpoint instruction at @vaddr. * Return 0 (success) or a negative errno. */ int __weak set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr) { return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN, true); } /** * set_orig_insn - Restore the original instruction. * @vma: the probed virtual memory area. * @auprobe: arch specific probepoint information. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, restore the original opcode (opcode) at @vaddr. * Return 0 (success) or a negative errno. */ int __weak set_orig_insn(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr) { return uprobe_write_opcode(auprobe, vma, vaddr, *(uprobe_opcode_t *)&auprobe->insn, false); } /* uprobe should have guaranteed positive refcount */ static struct uprobe *get_uprobe(struct uprobe *uprobe) { refcount_inc(&uprobe->ref); return uprobe; } /* * uprobe should have guaranteed lifetime, which can be either of: * - caller already has refcount taken (and wants an extra one); * - uprobe is RCU protected and won't be freed until after grace period; * - we are holding uprobes_treelock (for read or write, doesn't matter). */ static struct uprobe *try_get_uprobe(struct uprobe *uprobe) { if (refcount_inc_not_zero(&uprobe->ref)) return uprobe; return NULL; } static inline bool uprobe_is_active(struct uprobe *uprobe) { return !RB_EMPTY_NODE(&uprobe->rb_node); } static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); kfree(uprobe); } static void uprobe_free_srcu(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); } static void uprobe_free_deferred(struct work_struct *work) { struct uprobe *uprobe = container_of(work, struct uprobe, work); write_lock(&uprobes_treelock); if (uprobe_is_active(uprobe)) { write_seqcount_begin(&uprobes_seqcount); rb_erase(&uprobe->rb_node, &uprobes_tree); write_seqcount_end(&uprobes_seqcount); } write_unlock(&uprobes_treelock); /* * If application munmap(exec_vma) before uprobe_unregister() * gets called, we don't get a chance to remove uprobe from * delayed_uprobe_list from remove_breakpoint(). Do it here. */ mutex_lock(&delayed_uprobe_lock); delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); /* start srcu -> rcu_tasks_trace -> kfree chain */ call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); } static void put_uprobe(struct uprobe *uprobe) { if (!refcount_dec_and_test(&uprobe->ref)) return; INIT_WORK(&uprobe->work, uprobe_free_deferred); schedule_work(&uprobe->work); } /* Initialize hprobe as SRCU-protected "leased" uprobe */ static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) { WARN_ON(!uprobe); hprobe->state = HPROBE_LEASED; hprobe->uprobe = uprobe; hprobe->srcu_idx = srcu_idx; } /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */ static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) { hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; hprobe->uprobe = uprobe; hprobe->srcu_idx = -1; } /* * hprobe_consume() fetches hprobe's underlying uprobe and detects whether * uprobe is SRCU protected or is refcounted. hprobe_consume() can be * used only once for a given hprobe. * * Caller has to call hprobe_finalize() and pass previous hprobe_state, so * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever * is appropriate. */ static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) { *hstate = xchg(&hprobe->state, HPROBE_CONSUMED); switch (*hstate) { case HPROBE_LEASED: case HPROBE_STABLE: return hprobe->uprobe; case HPROBE_GONE: /* uprobe is NULL, no SRCU */ case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */ return NULL; default: WARN(1, "hprobe invalid state %d", *hstate); return NULL; } } /* * Reset hprobe state and, if hprobe was LEASED, release SRCU lock. * hprobe_finalize() can only be used from current context after * hprobe_consume() call (which determines uprobe and hstate value). */ static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate) { switch (hstate) { case HPROBE_LEASED: __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); break; case HPROBE_STABLE: put_uprobe(hprobe->uprobe); break; case HPROBE_GONE: case HPROBE_CONSUMED: break; default: WARN(1, "hprobe invalid state %d", hstate); break; } } /* * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED) * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of * them can win the race to perform SRCU unlocking. Whoever wins must perform * SRCU unlock. * * Returns underlying valid uprobe or NULL, if there was no underlying uprobe * to begin with or we failed to bump its refcount and it's going away. * * Returned non-NULL uprobe can be still safely used within an ongoing SRCU * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has * an extra refcount for caller to assume and use. Otherwise, it's not * guaranteed that returned uprobe has a positive refcount, so caller has to * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current * SRCU lock region. See dup_utask(). */ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) { enum hprobe_state hstate; /* * Caller should guarantee that return_instance is not going to be * freed from under us. This can be achieved either through holding * rcu_read_lock() or by owning return_instance in the first place. * * Underlying uprobe is itself protected from reuse by SRCU, so ensure * SRCU lock is held properly. */ lockdep_assert(srcu_read_lock_held(&uretprobes_srcu)); hstate = READ_ONCE(hprobe->state); switch (hstate) { case HPROBE_STABLE: /* uprobe has positive refcount, bump refcount, if necessary */ return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; case HPROBE_GONE: /* * SRCU was unlocked earlier and we didn't manage to take * uprobe refcnt, so it's effectively NULL */ return NULL; case HPROBE_CONSUMED: /* * uprobe was consumed, so it's effectively NULL as far as * uretprobe processing logic is concerned */ return NULL; case HPROBE_LEASED: { struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); /* * Try to switch hprobe state, guarding against * hprobe_consume() or another hprobe_expire() racing with us. * Note, if we failed to get uprobe refcount, we use special * HPROBE_GONE state to signal that hprobe->uprobe shouldn't * be used as it will be freed after SRCU is unlocked. */ if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { /* We won the race, we are the ones to unlock SRCU */ __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); return get ? get_uprobe(uprobe) : uprobe; } /* * We lost the race, undo refcount bump (if it ever happened), * unless caller would like an extra refcount anyways. */ if (uprobe && !get) put_uprobe(uprobe); /* * Even if hprobe_consume() or another hprobe_expire() wins * the state update race and unlocks SRCU from under us, we * still have a guarantee that underyling uprobe won't be * freed due to ongoing caller's SRCU lock region, so we can * return it regardless. Also, if `get` was true, we also have * an extra ref for the caller to own. This is used in dup_utask(). */ return uprobe; } default: WARN(1, "unknown hprobe state %d", hstate); return NULL; } } static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) { if (l_inode < r->inode) return -1; if (l_inode > r->inode) return 1; if (l_offset < r->offset) return -1; if (l_offset > r->offset) return 1; return 0; } #define __node_2_uprobe(node) \ rb_entry((node), struct uprobe, rb_node) struct __uprobe_key { struct inode *inode; loff_t offset; }; static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) { const struct __uprobe_key *a = key; return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); } static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) { struct uprobe *u = __node_2_uprobe(a); return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); } /* * Assumes being inside RCU protected region. * No refcount is taken on returned uprobe. */ static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) { struct __uprobe_key key = { .inode = inode, .offset = offset, }; struct rb_node *node; unsigned int seq; lockdep_assert(rcu_read_lock_trace_held()); do { seq = read_seqcount_begin(&uprobes_seqcount); node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); /* * Lockless RB-tree lookups can result only in false negatives. * If the element is found, it is correct and can be returned * under RCU protection. If we find nothing, we need to * validate that seqcount didn't change. If it did, we have to * try again as we might have missed the element (false * negative). If seqcount is unchanged, search truly failed. */ if (node) return __node_2_uprobe(node); } while (read_seqcount_retry(&uprobes_seqcount, seq)); return NULL; } /* * Attempt to insert a new uprobe into uprobes_tree. * * If uprobe already exists (for given inode+offset), we just increment * refcount of previously existing uprobe. * * If not, a provided new instance of uprobe is inserted into the tree (with * assumed initial refcount == 1). * * In any case, we return a uprobe instance that ends up being in uprobes_tree. * Caller has to clean up new uprobe instance, if it ended up not being * inserted into the tree. * * We assume that uprobes_treelock is held for writing. */ static struct uprobe *__insert_uprobe(struct uprobe *uprobe) { struct rb_node *node; again: node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); if (node) { struct uprobe *u = __node_2_uprobe(node); if (!try_get_uprobe(u)) { rb_erase(node, &uprobes_tree); RB_CLEAR_NODE(&u->rb_node); goto again; } return u; } return uprobe; } /* * Acquire uprobes_treelock and insert uprobe into uprobes_tree * (or reuse existing one, see __insert_uprobe() comments above). */ static struct uprobe *insert_uprobe(struct uprobe *uprobe) { struct uprobe *u; write_lock(&uprobes_treelock); write_seqcount_begin(&uprobes_seqcount); u = __insert_uprobe(uprobe); write_seqcount_end(&uprobes_seqcount); write_unlock(&uprobes_treelock); return u; } static void ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) { pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", uprobe->inode->i_ino, (unsigned long long) uprobe->offset, (unsigned long long) cur_uprobe->ref_ctr_offset, (unsigned long long) uprobe->ref_ctr_offset); } static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, loff_t ref_ctr_offset) { struct uprobe *uprobe, *cur_uprobe; uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); if (!uprobe) return ERR_PTR(-ENOMEM); uprobe->inode = inode; uprobe->offset = offset; uprobe->ref_ctr_offset = ref_ctr_offset; INIT_LIST_HEAD(&uprobe->consumers); init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); RB_CLEAR_NODE(&uprobe->rb_node); refcount_set(&uprobe->ref, 1); /* add to uprobes_tree, sorted on inode:offset */ cur_uprobe = insert_uprobe(uprobe); /* a uprobe exists for this inode:offset combination */ if (cur_uprobe != uprobe) { if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { ref_ctr_mismatch_warn(cur_uprobe, uprobe); put_uprobe(cur_uprobe); kfree(uprobe); return ERR_PTR(-EINVAL); } kfree(uprobe); uprobe = cur_uprobe; } return uprobe; } static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { static atomic64_t id; down_write(&uprobe->consumer_rwsem); list_add_rcu(&uc->cons_node, &uprobe->consumers); uc->id = (__u64) atomic64_inc_return(&id); up_write(&uprobe->consumer_rwsem); } /* * For uprobe @uprobe, delete the consumer @uc. * Should never be called with consumer that's not part of @uprobe->consumers. */ static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) { down_write(&uprobe->consumer_rwsem); list_del_rcu(&uc->cons_node); up_write(&uprobe->consumer_rwsem); } static int __copy_insn(struct address_space *mapping, struct file *filp, void *insn, int nbytes, loff_t offset) { struct page *page; /* * Ensure that the page that has the original instruction is populated * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), * see uprobe_register(). */ if (mapping->a_ops->read_folio) page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); uprobe_copy_from_page(page, offset, insn, nbytes); put_page(page); return 0; } static int copy_insn(struct uprobe *uprobe, struct file *filp) { struct address_space *mapping = uprobe->inode->i_mapping; loff_t offs = uprobe->offset; void *insn = &uprobe->arch.insn; int size = sizeof(uprobe->arch.insn); int len, err = -EIO; /* Copy only available bytes, -EIO if nothing was read */ do { if (offs >= i_size_read(uprobe->inode)) break; len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); err = __copy_insn(mapping, filp, insn, len, offs); if (err) break; insn += len; offs += len; size -= len; } while (size); return err; } static int prepare_uprobe(struct uprobe *uprobe, struct file *file, struct mm_struct *mm, unsigned long vaddr) { int ret = 0; if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) return ret; /* TODO: move this into _register, until then we abuse this sem. */ down_write(&uprobe->consumer_rwsem); if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) goto out; ret = copy_insn(uprobe, file); if (ret) goto out; ret = -ENOTSUPP; if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) goto out; ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); if (ret) goto out; smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); out: up_write(&uprobe->consumer_rwsem); return ret; } static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) { return !uc->filter || uc->filter(uc, mm); } static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) { struct uprobe_consumer *uc; bool ret = false; down_read(&uprobe->consumer_rwsem); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { ret = consumer_filter(uc, mm); if (ret) break; } up_read(&uprobe->consumer_rwsem); return ret; } static int install_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long vaddr) { struct mm_struct *mm = vma->vm_mm; bool first_uprobe; int ret; ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); if (ret) return ret; /* * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), * the task can hit this breakpoint right after __replace_page(). */ first_uprobe = !mm_flags_test(MMF_HAS_UPROBES, mm); if (first_uprobe) mm_flags_set(MMF_HAS_UPROBES, mm); ret = set_swbp(&uprobe->arch, vma, vaddr); if (!ret) mm_flags_clear(MMF_RECALC_UPROBES, mm); else if (first_uprobe) mm_flags_clear(MMF_HAS_UPROBES, mm); return ret; } static int remove_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long vaddr) { struct mm_struct *mm = vma->vm_mm; mm_flags_set(MMF_RECALC_UPROBES, mm); return set_orig_insn(&uprobe->arch, vma, vaddr); } struct map_info { struct map_info *next; struct mm_struct *mm; unsigned long vaddr; }; static inline struct map_info *free_map_info(struct map_info *info) { struct map_info *next = info->next; kfree(info); return next; } static struct map_info * build_map_info(struct address_space *mapping, loff_t offset, bool is_register) { unsigned long pgoff = offset >> PAGE_SHIFT; struct vm_area_struct *vma; struct map_info *curr = NULL; struct map_info *prev = NULL; struct map_info *info; int more = 0; again: i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { if (!valid_vma(vma, is_register)) continue; if (!prev && !more) { /* * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through * reclaim. This is optimistic, no harm done if it fails. */ prev = kmalloc(sizeof(struct map_info), GFP_NOWAIT | __GFP_NOMEMALLOC); if (prev) prev->next = NULL; } if (!prev) { more++; continue; } if (!mmget_not_zero(vma->vm_mm)) continue; info = prev; prev = prev->next; info->next = curr; curr = info; info->mm = vma->vm_mm; info->vaddr = offset_to_vaddr(vma, offset); } i_mmap_unlock_read(mapping); if (!more) goto out; prev = curr; while (curr) { mmput(curr->mm); curr = curr->next; } do { info = kmalloc(sizeof(struct map_info), GFP_KERNEL); if (!info) { curr = ERR_PTR(-ENOMEM); goto out; } info->next = prev; prev = info; } while (--more); goto again; out: while (prev) prev = free_map_info(prev); return curr; } static int register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) { bool is_register = !!new; struct map_info *info; int err = 0; percpu_down_write(&dup_mmap_sem); info = build_map_info(uprobe->inode->i_mapping, uprobe->offset, is_register); if (IS_ERR(info)) { err = PTR_ERR(info); goto out; } while (info) { struct mm_struct *mm = info->mm; struct vm_area_struct *vma; if (err && is_register) goto free; /* * We take mmap_lock for writing to avoid the race with * find_active_uprobe_rcu() which takes mmap_lock for reading. * Thus this install_breakpoint() can not make * is_trap_at_addr() true right after find_uprobe_rcu() * returns NULL in find_active_uprobe_rcu(). */ mmap_write_lock(mm); if (check_stable_address_space(mm)) goto unlock; vma = find_vma(mm, info->vaddr); if (!vma || !valid_vma(vma, is_register) || file_inode(vma->vm_file) != uprobe->inode) goto unlock; if (vma->vm_start > info->vaddr || vaddr_to_offset(vma, info->vaddr) != uprobe->offset) goto unlock; if (is_register) { /* consult only the "caller", new consumer. */ if (consumer_filter(new, mm)) err = install_breakpoint(uprobe, vma, info->vaddr); } else if (mm_flags_test(MMF_HAS_UPROBES, mm)) { if (!filter_chain(uprobe, mm)) err |= remove_breakpoint(uprobe, vma, info->vaddr); } unlock: mmap_write_unlock(mm); free: mmput(mm); info = free_map_info(info); } out: percpu_up_write(&dup_mmap_sem); return err; } /** * uprobe_unregister_nosync - unregister an already registered probe. * @uprobe: uprobe to remove * @uc: identify which probe if multiple probes are colocated. */ void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) { int err; down_write(&uprobe->register_rwsem); consumer_del(uprobe, uc); err = register_for_each_vma(uprobe, NULL); up_write(&uprobe->register_rwsem); /* TODO : cant unregister? schedule a worker thread */ if (unlikely(err)) { uprobe_warn(current, "unregister, leaking uprobe"); return; } put_uprobe(uprobe); } EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); void uprobe_unregister_sync(void) { /* * Now that handler_chain() and handle_uretprobe_chain() iterate over * uprobe->consumers list under RCU protection without holding * uprobe->register_rwsem, we need to wait for RCU grace period to * make sure that we can't call into just unregistered * uprobe_consumer's callbacks anymore. If we don't do that, fast and * unlucky enough caller can free consumer's memory and cause * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ synchronize_rcu_tasks_trace(); synchronize_srcu(&uretprobes_srcu); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); /** * uprobe_register - register a probe * @inode: the file in which the probe has to be placed. * @offset: offset from the start of the file. * @ref_ctr_offset: offset of SDT marker / reference counter * @uc: information on howto handle the probe.. * * Apart from the access refcount, uprobe_register() takes a creation * refcount (thro alloc_uprobe) if and only if this @uprobe is getting * inserted into the rbtree (i.e first consumer for a @inode:@offset * tuple). Creation refcount stops uprobe_unregister from freeing the * @uprobe even before the register operation is complete. Creation * refcount is released when the last @uc for the @uprobe * unregisters. Caller of uprobe_register() is required to keep @inode * (and the containing mount) referenced. * * Return: pointer to the new uprobe on success or an ERR_PTR on failure. */ struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) { struct uprobe *uprobe; int ret; /* Uprobe must have at least one set consumer */ if (!uc->handler && !uc->ret_handler) return ERR_PTR(-EINVAL); /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ if (!inode->i_mapping->a_ops->read_folio && !shmem_mapping(inode->i_mapping)) return ERR_PTR(-EIO); /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) return ERR_PTR(-EINVAL); /* * This ensures that uprobe_copy_from_page(), copy_to_page() and * __update_ref_ctr() can't cross page boundary. */ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) return ERR_PTR(-EINVAL); if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) return ERR_PTR(-EINVAL); uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); if (IS_ERR(uprobe)) return uprobe; down_write(&uprobe->register_rwsem); consumer_add(uprobe, uc); ret = register_for_each_vma(uprobe, uc); up_write(&uprobe->register_rwsem); if (ret) { uprobe_unregister_nosync(uprobe, uc); /* * Registration might have partially succeeded, so we can have * this consumer being called right at this time. We need to * sync here. It's ok, it's unlikely slow path. */ uprobe_unregister_sync(); return ERR_PTR(ret); } return uprobe; } EXPORT_SYMBOL_GPL(uprobe_register); /** * uprobe_apply - add or remove the breakpoints according to @uc->filter * @uprobe: uprobe which "owns" the breakpoint * @uc: consumer which wants to add more or remove some breakpoints * @add: add or remove the breakpoints * Return: 0 on success or negative error code. */ int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) { struct uprobe_consumer *con; int ret = -ENOENT; down_write(&uprobe->register_rwsem); rcu_read_lock_trace(); list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (con == uc) { ret = register_for_each_vma(uprobe, add ? uc : NULL); break; } } rcu_read_unlock_trace(); up_write(&uprobe->register_rwsem); return ret; } static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; int err = 0; mmap_write_lock(mm); for_each_vma(vmi, vma) { unsigned long vaddr; loff_t offset; if (!valid_vma(vma, false) || file_inode(vma->vm_file) != uprobe->inode) continue; offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; if (uprobe->offset < offset || uprobe->offset >= offset + vma->vm_end - vma->vm_start) continue; vaddr = offset_to_vaddr(vma, uprobe->offset); err |= remove_breakpoint(uprobe, vma, vaddr); } mmap_write_unlock(mm); return err; } static struct rb_node * find_node_in_range(struct inode *inode, loff_t min, loff_t max) { struct rb_node *n = uprobes_tree.rb_node; while (n) { struct uprobe *u = rb_entry(n, struct uprobe, rb_node); if (inode < u->inode) { n = n->rb_left; } else if (inode > u->inode) { n = n->rb_right; } else { if (max < u->offset) n = n->rb_left; else if (min > u->offset) n = n->rb_right; else break; } } return n; } /* * For a given range in vma, build a list of probes that need to be inserted. */ static void build_probe_list(struct inode *inode, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *head) { loff_t min, max; struct rb_node *n, *t; struct uprobe *u; INIT_LIST_HEAD(head); min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; read_lock(&uprobes_treelock); n = find_node_in_range(inode, min, max); if (n) { for (t = n; t; t = rb_prev(t)) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset < min) break; /* if uprobe went away, it's safe to ignore it */ if (try_get_uprobe(u)) list_add(&u->pending_list, head); } for (t = n; (t = rb_next(t)); ) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset > max) break; /* if uprobe went away, it's safe to ignore it */ if (try_get_uprobe(u)) list_add(&u->pending_list, head); } } read_unlock(&uprobes_treelock); } /* @vma contains reference counter, not the probed instruction. */ static int delayed_ref_ctr_inc(struct vm_area_struct *vma) { struct list_head *pos, *q; struct delayed_uprobe *du; unsigned long vaddr; int ret = 0, err = 0; mutex_lock(&delayed_uprobe_lock); list_for_each_safe(pos, q, &delayed_uprobe_list) { du = list_entry(pos, struct delayed_uprobe, list); if (du->mm != vma->vm_mm || !valid_ref_ctr_vma(du->uprobe, vma)) continue; vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); if (ret) { update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); if (!err) err = ret; } delayed_uprobe_delete(du); } mutex_unlock(&delayed_uprobe_lock); return err; } /* * Called from mmap_region/vma_merge with mm->mmap_lock acquired. * * Currently we ignore all errors and always return 0, the callers * can't handle the failure anyway. */ int uprobe_mmap(struct vm_area_struct *vma) { struct list_head tmp_list; struct uprobe *uprobe, *u; struct inode *inode; if (no_uprobe_events()) return 0; if (vma->vm_file && (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm)) delayed_ref_ctr_inc(vma); if (!valid_vma(vma, true)) return 0; inode = file_inode(vma->vm_file); if (!inode) return 0; mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); /* * We can race with uprobe_unregister(), this uprobe can be already * removed. But in this case filter_chain() must return false, all * consumers have gone away. */ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { if (!fatal_signal_pending(current) && filter_chain(uprobe, vma->vm_mm)) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); install_breakpoint(uprobe, vma, vaddr); } put_uprobe(uprobe); } mutex_unlock(uprobes_mmap_hash(inode)); return 0; } static bool vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) { loff_t min, max; struct inode *inode; struct rb_node *n; inode = file_inode(vma->vm_file); min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; read_lock(&uprobes_treelock); n = find_node_in_range(inode, min, max); read_unlock(&uprobes_treelock); return !!n; } /* * Called in context of a munmap of a vma. */ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (no_uprobe_events() || !valid_vma(vma, false)) return; if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ return; if (!mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm) || mm_flags_test(MMF_RECALC_UPROBES, vma->vm_mm)) return; if (vma_has_uprobes(vma, start, end)) mm_flags_set(MMF_RECALC_UPROBES, vma->vm_mm); } static vm_fault_t xol_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { struct xol_area *area = vma->vm_mm->uprobes_state.xol_area; vmf->page = area->page; get_page(vmf->page); return 0; } static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { return -EPERM; } static const struct vm_special_mapping xol_mapping = { .name = "[uprobes]", .fault = xol_fault, .mremap = xol_mremap, }; /* Slot allocation for XOL */ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) { struct vm_area_struct *vma; int ret; if (mmap_write_lock_killable(mm)) return -EINTR; if (mm->uprobes_state.xol_area) { ret = -EALREADY; goto fail; } if (!area->vaddr) { /* Try to map as high as possible, this is only a hint. */ area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(area->vaddr)) { ret = area->vaddr; goto fail; } } vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO| VM_SEALED_SYSMAP, &xol_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; } ret = 0; /* pairs with get_xol_area() */ smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ fail: mmap_write_unlock(mm); return ret; } void * __weak arch_uretprobe_trampoline(unsigned long *psize) { static uprobe_opcode_t insn = UPROBE_SWBP_INSN; *psize = UPROBE_SWBP_INSN_SIZE; return &insn; } static struct xol_area *__create_xol_area(unsigned long vaddr) { struct mm_struct *mm = current->mm; unsigned long insns_size; struct xol_area *area; void *insns; area = kzalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) goto out; area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), GFP_KERNEL); if (!area->bitmap) goto free_area; area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); if (!area->page) goto free_bitmap; area->vaddr = vaddr; init_waitqueue_head(&area->wq); /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); insns = arch_uretprobe_trampoline(&insns_size); arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); if (!xol_add_vma(mm, area)) return area; __free_page(area->page); free_bitmap: kfree(area->bitmap); free_area: kfree(area); out: return NULL; } /* * get_xol_area - Allocate process's xol_area if necessary. * This area will be used for storing instructions for execution out of line. * * Returns the allocated area or NULL. */ static struct xol_area *get_xol_area(void) { struct mm_struct *mm = current->mm; struct xol_area *area; if (!mm->uprobes_state.xol_area) __create_xol_area(0); /* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ return area; } void __weak arch_uprobe_clear_state(struct mm_struct *mm) { } void __weak arch_uprobe_init_state(struct mm_struct *mm) { } /* * uprobe_clear_state - Free the area allocated for slots. */ void uprobe_clear_state(struct mm_struct *mm) { struct xol_area *area = mm->uprobes_state.xol_area; mutex_lock(&delayed_uprobe_lock); delayed_uprobe_remove(NULL, mm); mutex_unlock(&delayed_uprobe_lock); arch_uprobe_clear_state(mm); if (!area) return; put_page(area->page); kfree(area->bitmap); kfree(area); } void uprobe_start_dup_mmap(void) { percpu_down_read(&dup_mmap_sem); } void uprobe_end_dup_mmap(void) { percpu_up_read(&dup_mmap_sem); } void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) { if (mm_flags_test(MMF_HAS_UPROBES, oldmm)) { mm_flags_set(MMF_HAS_UPROBES, newmm); /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ mm_flags_set(MMF_RECALC_UPROBES, newmm); } } static unsigned long xol_get_slot_nr(struct xol_area *area) { unsigned long slot_nr; slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); if (slot_nr < UINSNS_PER_PAGE) { if (!test_and_set_bit(slot_nr, area->bitmap)) return slot_nr; } return UINSNS_PER_PAGE; } /* * xol_get_insn_slot - allocate a slot for xol. */ static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { struct xol_area *area = get_xol_area(); unsigned long slot_nr; if (!area) return false; wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return true; } /* * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ static void xol_free_insn_slot(struct uprobe_task *utask) { struct xol_area *area = current->mm->uprobes_state.xol_area; unsigned long offset = utask->xol_vaddr - area->vaddr; unsigned int slot_nr; utask->xol_vaddr = 0; /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */ if (WARN_ON_ONCE(offset >= PAGE_SIZE)) return; slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); } void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { /* Initialize the slot */ copy_to_page(page, vaddr, src, len); /* * We probably need flush_icache_user_page() but it needs vma. * This should work on most of architectures by default. If * architecture needs to do something different it can define * its own version of the function. */ flush_dcache_page(page); } /** * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs * @regs: Reflects the saved state of the task after it has hit a breakpoint * instruction. * Return the address of the breakpoint instruction. */ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; } unsigned long uprobe_get_trap_addr(struct pt_regs *regs) { struct uprobe_task *utask = current->utask; if (unlikely(utask && utask->active_uprobe)) return utask->vaddr; return instruction_pointer(regs); } static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri) { ri->cons_cnt = 0; ri->next = utask->ri_pool; utask->ri_pool = ri; } static struct return_instance *ri_pool_pop(struct uprobe_task *utask) { struct return_instance *ri = utask->ri_pool; if (likely(ri)) utask->ri_pool = ri->next; return ri; } static void ri_free(struct return_instance *ri) { kfree(ri->extra_consumers); kfree_rcu(ri, rcu); } static void free_ret_instance(struct uprobe_task *utask, struct return_instance *ri, bool cleanup_hprobe) { unsigned seq; if (cleanup_hprobe) { enum hprobe_state hstate; (void)hprobe_consume(&ri->hprobe, &hstate); hprobe_finalize(&ri->hprobe, hstate); } /* * At this point return_instance is unlinked from utask's * return_instances list and this has become visible to ri_timer(). * If seqcount now indicates that ri_timer's return instance * processing loop isn't active, we can return ri into the pool of * to-be-reused return instances for future uretprobes. If ri_timer() * happens to be running right now, though, we fallback to safety and * just perform RCU-delated freeing of ri. * Admittedly, this is a rather simple use of seqcount, but it nicely * abstracts away all the necessary memory barriers, so we use * a well-supported kernel primitive here. */ if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) { /* immediate reuse of ri without RCU GP is OK */ ri_pool_push(utask, ri); } else { /* we might be racing with ri_timer(), so play it safe */ ri_free(ri); } } /* * Called with no locks held. * Called in context of an exiting or an exec-ing thread. */ void uprobe_free_utask(struct task_struct *t) { struct uprobe_task *utask = t->utask; struct return_instance *ri, *ri_next; if (!utask) return; t->utask = NULL; WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); timer_delete_sync(&utask->ri_timer); ri = utask->return_instances; while (ri) { ri_next = ri->next; free_ret_instance(utask, ri, true /* cleanup_hprobe */); ri = ri_next; } /* free_ret_instance() above might add to ri_pool, so this loop should come last */ ri = utask->ri_pool; while (ri) { ri_next = ri->next; ri_free(ri); ri = ri_next; } kfree(utask); } #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */ #define for_each_ret_instance_rcu(pos, head) \ for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next)) static void ri_timer(struct timer_list *timer) { struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer); struct return_instance *ri; /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */ guard(srcu)(&uretprobes_srcu); /* RCU protects return_instance from freeing. */ guard(rcu)(); /* * See free_ret_instance() for notes on seqcount use. * We also employ raw API variants to avoid lockdep false-positive * warning complaining about enabled preemption. The timer can only be * invoked once for a uprobe_task. Therefore there can only be one * writer. The reader does not require an even sequence count to make * progress, so it is OK to remain preemptible on PREEMPT_RT. */ raw_write_seqcount_begin(&utask->ri_seqcount); for_each_ret_instance_rcu(ri, utask->return_instances) hprobe_expire(&ri->hprobe, false); raw_write_seqcount_end(&utask->ri_seqcount); } static struct uprobe_task *alloc_utask(void) { struct uprobe_task *utask; utask = kzalloc(sizeof(*utask), GFP_KERNEL); if (!utask) return NULL; timer_setup(&utask->ri_timer, ri_timer, 0); seqcount_init(&utask->ri_seqcount); return utask; } /* * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. * * Returns: * - pointer to new uprobe_task on success * - NULL otherwise */ static struct uprobe_task *get_utask(void) { if (!current->utask) current->utask = alloc_utask(); return current->utask; } static struct return_instance *alloc_return_instance(struct uprobe_task *utask) { struct return_instance *ri; ri = ri_pool_pop(utask); if (ri) return ri; ri = kzalloc(sizeof(*ri), GFP_KERNEL); if (!ri) return ZERO_SIZE_PTR; return ri; } static struct return_instance *dup_return_instance(struct return_instance *old) { struct return_instance *ri; ri = kmemdup(old, sizeof(*ri), GFP_KERNEL); if (!ri) return NULL; if (unlikely(old->cons_cnt > 1)) { ri->extra_consumers = kmemdup(old->extra_consumers, sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1), GFP_KERNEL); if (!ri->extra_consumers) { kfree(ri); return NULL; } } return ri; } static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; struct return_instance **p, *o, *n; struct uprobe *uprobe; n_utask = alloc_utask(); if (!n_utask) return -ENOMEM; t->utask = n_utask; /* protect uprobes from freeing, we'll need try_get_uprobe() them */ guard(srcu)(&uretprobes_srcu); p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { n = dup_return_instance(o); if (!n) return -ENOMEM; /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */ uprobe = hprobe_expire(&o->hprobe, true); /* * New utask will have stable properly refcounted uprobe or * NULL. Even if we failed to get refcounted uprobe, we still * need to preserve full set of return_instances for proper * uretprobe handling and nesting in forked task. */ hprobe_init_stable(&n->hprobe, uprobe); n->next = NULL; rcu_assign_pointer(*p, n); p = &n->next; n_utask->depth++; } return 0; } static void dup_xol_work(struct callback_head *work) { if (current->flags & PF_EXITING) return; if (!__create_xol_area(current->utask->dup_xol_addr) && !fatal_signal_pending(current)) uprobe_warn(current, "dup xol area"); } /* * Called in context of a new clone/fork from copy_process. */ void uprobe_copy_process(struct task_struct *t, u64 flags) { struct uprobe_task *utask = current->utask; struct mm_struct *mm = current->mm; struct xol_area *area; t->utask = NULL; if (!utask || !utask->return_instances) return; if (mm == t->mm && !(flags & CLONE_VFORK)) return; if (dup_utask(t, utask)) return uprobe_warn(t, "dup ret instances"); /* The task can fork() after dup_xol_work() fails */ area = mm->uprobes_state.xol_area; if (!area) return uprobe_warn(t, "dup xol area"); if (mm == t->mm) return; t->utask->dup_xol_addr = area->vaddr; init_task_work(&t->utask->dup_xol_work, dup_xol_work); task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); } /* * Current area->vaddr notion assume the trampoline address is always * equal area->vaddr. * * Returns -1 in case the xol_area is not allocated. */ unsigned long uprobe_get_trampoline_vaddr(void) { unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR; struct xol_area *area; /* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ if (area) trampoline_vaddr = area->vaddr; return trampoline_vaddr; } static void cleanup_return_instances(struct uprobe_task *utask, bool chained, struct pt_regs *regs) { struct return_instance *ri = utask->return_instances, *ri_next; enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { ri_next = ri->next; rcu_assign_pointer(utask->return_instances, ri_next); utask->depth--; free_ret_instance(utask, ri, true /* cleanup_hprobe */); ri = ri_next; } } static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, struct return_instance *ri) { struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; bool chained; int srcu_idx; if (!get_xol_area()) goto free; if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", current->pid, current->tgid); goto free; } trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) goto free; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); cleanup_return_instances(utask, chained, regs); /* * We don't want to keep trampoline address in stack, rather keep the * original return address of first caller thru all the consequent * instances. This also makes breakpoint unwrapping easier. */ if (chained) { if (!utask->return_instances) { /* * This situation is not possible. Likely we have an * attack from user-space. */ uprobe_warn(current, "handle tail call"); goto free; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } /* __srcu_read_lock() because SRCU lock survives switch to user space */ srcu_idx = __srcu_read_lock(&uretprobes_srcu); ri->func = instruction_pointer(regs); ri->stack = user_stack_pointer(regs); ri->orig_ret_vaddr = orig_ret_vaddr; ri->chained = chained; utask->depth++; hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); ri->next = utask->return_instances; rcu_assign_pointer(utask->return_instances, ri); mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD); return; free: ri_free(ri); } /* Prepare to single-step probed instruction out of line. */ static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe_task *utask = current->utask; int err; if (!try_get_uprobe(uprobe)) return -EINVAL; if (!xol_get_insn_slot(uprobe, utask)) { err = -ENOMEM; goto err_out; } utask->vaddr = bp_vaddr; err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { xol_free_insn_slot(utask); goto err_out; } utask->active_uprobe = uprobe; utask->state = UTASK_SSTEP; return 0; err_out: put_uprobe(uprobe); return err; } /* * If we are singlestepping, then ensure this thread is not connected to * non-fatal signals until completion of singlestep. When xol insn itself * triggers the signal, restart the original insn even if the task is * already SIGKILL'ed (since coredump should report the correct ip). This * is even more important if the task has a handler for SIGSEGV/etc, The * _same_ instruction should be repeated again after return from the signal * handler, and SSTEP can never finish in this case. */ bool uprobe_deny_signal(void) { struct task_struct *t = current; struct uprobe_task *utask = t->utask; if (likely(!utask || !utask->active_uprobe)) return false; WARN_ON_ONCE(utask->state != UTASK_SSTEP); if (task_sigpending(t)) { utask->signal_denied = true; clear_tsk_thread_flag(t, TIF_SIGPENDING); if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { utask->state = UTASK_SSTEP_TRAPPED; set_tsk_thread_flag(t, TIF_UPROBE); } } return true; } static void mmf_recalc_uprobes(struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; for_each_vma(vmi, vma) { if (!valid_vma(vma, false)) continue; /* * This is not strictly accurate, we can race with * uprobe_unregister() and see the already removed * uprobe if delete_uprobe() was not yet called. * Or this uprobe can be filtered out. */ if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) return; } mm_flags_clear(MMF_HAS_UPROBES, mm); } static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) { struct page *page; uprobe_opcode_t opcode; int result; if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) return -EINVAL; pagefault_disable(); result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); pagefault_enable(); if (likely(result == 0)) goto out; result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); if (result < 0) return result; uprobe_copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); put_page(page); out: /* This needs to return true for any variant of the trap insn */ return is_trap_insn(&opcode); } static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr) { struct mm_struct *mm = current->mm; struct uprobe *uprobe = NULL; struct vm_area_struct *vma; struct file *vm_file; loff_t offset; unsigned int seq; guard(rcu)(); if (!mmap_lock_speculate_try_begin(mm, &seq)) return NULL; vma = vma_lookup(mm, bp_vaddr); if (!vma) return NULL; /* * vm_file memory can be reused for another instance of struct file, * but can't be freed from under us, so it's safe to read fields from * it, even if the values are some garbage values; ultimately * find_uprobe_rcu() + mmap_lock_speculation_end() check will ensure * that whatever we speculatively found is correct */ vm_file = READ_ONCE(vma->vm_file); if (!vm_file) return NULL; offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start); uprobe = find_uprobe_rcu(vm_file->f_inode, offset); if (!uprobe) return NULL; /* now double check that nothing about MM changed */ if (mmap_lock_speculate_retry(mm, seq)) return NULL; return uprobe; } /* assumes being inside RCU protected region */ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) { struct mm_struct *mm = current->mm; struct uprobe *uprobe = NULL; struct vm_area_struct *vma; uprobe = find_active_uprobe_speculative(bp_vaddr); if (uprobe) return uprobe; mmap_read_lock(mm); vma = vma_lookup(mm, bp_vaddr); if (vma) { if (vma->vm_file) { struct inode *inode = file_inode(vma->vm_file); loff_t offset = vaddr_to_offset(vma, bp_vaddr); uprobe = find_uprobe_rcu(inode, offset); } if (!uprobe) *is_swbp = is_trap_at_addr(mm, bp_vaddr); } else { *is_swbp = -EFAULT; } if (!uprobe && mm_flags_test_and_clear(MMF_RECALC_UPROBES, mm)) mmf_recalc_uprobes(mm); mmap_read_unlock(mm); return uprobe; } static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie) { struct return_consumer *ric; if (unlikely(ri == ZERO_SIZE_PTR)) return ri; if (unlikely(ri->cons_cnt > 0)) { ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL); if (!ric) { ri_free(ri); return ZERO_SIZE_PTR; } ri->extra_consumers = ric; } ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1]; ric->id = id; ric->cookie = cookie; ri->cons_cnt++; return ri; } static struct return_consumer * return_consumer_find(struct return_instance *ri, int *iter, int id) { struct return_consumer *ric; int idx; for (idx = *iter; idx < ri->cons_cnt; idx++) { ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1]; if (ric->id == id) { *iter = idx + 1; return ric; } } return NULL; } static bool ignore_ret_handler(int rc) { return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE; } static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) { struct uprobe_consumer *uc; bool has_consumers = false, remove = true; struct return_instance *ri = NULL; struct uprobe_task *utask = current->utask; utask->auprobe = &uprobe->arch; list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; __u64 cookie = 0; int rc = 0; if (uc->handler) { rc = uc->handler(uc, regs, &cookie); WARN(rc < 0 || rc > 2, "bad rc=0x%x from %ps()\n", rc, uc->handler); } remove &= rc == UPROBE_HANDLER_REMOVE; has_consumers = true; if (!uc->ret_handler || ignore_ret_handler(rc)) continue; if (!ri) ri = alloc_return_instance(utask); if (session) ri = push_consumer(ri, uc->id, cookie); } utask->auprobe = NULL; if (!ZERO_OR_NULL_PTR(ri)) prepare_uretprobe(uprobe, regs, ri); if (remove && has_consumers) { down_read(&uprobe->register_rwsem); /* re-check that removal is still required, this time under lock */ if (!filter_chain(uprobe, current->mm)) { WARN_ON(!uprobe_is_active(uprobe)); unapply_uprobe(uprobe, current->mm); } up_read(&uprobe->register_rwsem); } } static void handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) { struct return_consumer *ric; struct uprobe_consumer *uc; int ric_idx = 0; /* all consumers unsubscribed meanwhile */ if (unlikely(!uprobe)) return; rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; if (uc->ret_handler) { ric = return_consumer_find(ri, &ric_idx, uc->id); if (!session || ric) uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL); } } rcu_read_unlock_trace(); } static struct return_instance *find_next_ret_chain(struct return_instance *ri) { bool chained; do { chained = ri->chained; ri = ri->next; /* can't be NULL if chained */ } while (chained); return ri; } void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *ri_next, *next_chain; struct uprobe *uprobe; enum hprobe_state hstate; bool valid; utask = current->utask; if (!utask) goto sigill; ri = utask->return_instances; if (!ri) goto sigill; do { /* * We should throw out the frames invalidated by longjmp(). * If this chain is valid, then the next one should be alive * or NULL; the latter case means that nobody but ri->func * could hit this trampoline on return. TODO: sigaltstack(). */ next_chain = find_next_ret_chain(ri); valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs); instruction_pointer_set(regs, ri->orig_ret_vaddr); do { /* pop current instance from the stack of pending return instances, * as it's not pending anymore: we just fixed up original * instruction pointer in regs and are about to call handlers; * this allows fixup_uretprobe_trampoline_entries() to properly fix up * captured stack traces from uretprobe handlers, in which pending * trampoline addresses on the stack are replaced with correct * original return addresses */ ri_next = ri->next; rcu_assign_pointer(utask->return_instances, ri_next); utask->depth--; uprobe = hprobe_consume(&ri->hprobe, &hstate); if (valid) handle_uretprobe_chain(ri, uprobe, regs); hprobe_finalize(&ri->hprobe, hstate); /* We already took care of hprobe, no need to waste more time on that. */ free_ret_instance(utask, ri, false /* !cleanup_hprobe */); ri = ri_next; } while (ri != next_chain); } while (!valid); return; sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); force_sig(SIGILL); } bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) { return false; } bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs) { return true; } void __weak arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr) { } /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. */ static void handle_swbp(struct pt_regs *regs) { struct uprobe *uprobe; unsigned long bp_vaddr; int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); if (bp_vaddr == uprobe_get_trampoline_vaddr()) return uprobe_handle_trampoline(regs); rcu_read_lock_trace(); uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't * access this memory. The latter is only possible if * another thread plays with our ->mm. In both cases * we can simply restart. If this vma was unmapped we * can pretend this insn was not executed yet and get * the (correct) SIGSEGV after restart. */ instruction_pointer_set(regs, bp_vaddr); } goto out; } /* change it in advance for ->handler() and restart */ instruction_pointer_set(regs, bp_vaddr); /* * TODO: move copy_insn/etc into _register and remove this hack. * After we hit the bp, _unregister + _register can install the * new and not-yet-analyzed uprobe at the same address, restart. */ if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) goto out; /* * Pairs with the smp_wmb() in prepare_uprobe(). * * Guarantees that if we see the UPROBE_COPY_INSN bit set, then * we must also see the stores to &uprobe->arch performed by the * prepare_uprobe() call. */ smp_rmb(); /* Tracing handlers use ->utask to communicate with fetch methods */ if (!get_utask()) goto out; if (arch_uprobe_ignore(&uprobe->arch, regs)) goto out; handler_chain(uprobe, regs); /* Try to optimize after first hit. */ arch_uprobe_optimize(&uprobe->arch, bp_vaddr); /* * If user decided to take execution elsewhere, it makes little sense * to execute the original instruction, so let's skip it. */ if (instruction_pointer(regs) != bp_vaddr) goto out; if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) goto out; if (pre_ssout(uprobe, regs, bp_vaddr)) goto out; out: /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ rcu_read_unlock_trace(); } void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe *uprobe; int is_swbp; guard(rcu_tasks_trace)(); uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); if (!uprobe) return; if (!get_utask()) return; if (arch_uprobe_ignore(&uprobe->arch, regs)) return; handler_chain(uprobe, regs); } /* * Perform required fix-ups and disable singlestep. * Allow pending signals to take effect. */ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) { struct uprobe *uprobe; int err = 0; uprobe = utask->active_uprobe; if (utask->state == UTASK_SSTEP_ACK) err = arch_uprobe_post_xol(&uprobe->arch, regs); else if (utask->state == UTASK_SSTEP_TRAPPED) arch_uprobe_abort_xol(&uprobe->arch, regs); else WARN_ON_ONCE(1); put_uprobe(uprobe); utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; xol_free_insn_slot(utask); if (utask->signal_denied) { set_thread_flag(TIF_SIGPENDING); utask->signal_denied = false; } if (unlikely(err)) { uprobe_warn(current, "execute the probed insn, sending SIGILL."); force_sig(SIGILL); } } /* * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and * allows the thread to return from interrupt. After that handle_swbp() * sets utask->active_uprobe. * * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag * and allows the thread to return from interrupt. * * While returning to userspace, thread notices the TIF_UPROBE flag and calls * uprobe_notify_resume(). */ void uprobe_notify_resume(struct pt_regs *regs) { struct uprobe_task *utask; clear_thread_flag(TIF_UPROBE); utask = current->utask; if (utask && utask->active_uprobe) handle_singlestep(utask, regs); else handle_swbp(regs); } /* * uprobe_pre_sstep_notifier gets called from interrupt context as part of * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. */ int uprobe_pre_sstep_notifier(struct pt_regs *regs) { if (!current->mm) return 0; if (!mm_flags_test(MMF_HAS_UPROBES, current->mm) && (!current->utask || !current->utask->return_instances)) return 0; set_thread_flag(TIF_UPROBE); return 1; } /* * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. */ int uprobe_post_sstep_notifier(struct pt_regs *regs) { struct uprobe_task *utask = current->utask; if (!current->mm || !utask || !utask->active_uprobe) /* task is currently not uprobed */ return 0; utask->state = UTASK_SSTEP_ACK; set_thread_flag(TIF_UPROBE); return 1; } static struct notifier_block uprobe_exception_nb = { .notifier_call = arch_uprobe_exception_notify, .priority = INT_MAX-1, /* notified after kprobes, kgdb */ }; void __init uprobes_init(void) { int i; for (i = 0; i < UPROBES_HASH_SZ; i++) mutex_init(&uprobes_mmap_mutex[i]); BUG_ON(register_die_notifier(&uprobe_exception_nb)); } |
| 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Tascam US-X2Y USB soundcards * * FPGA Loader + ALSA Startup * * Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de> */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/memalloc.h> #include <sound/pcm.h> #include <sound/hwdep.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) { unsigned long offset; struct page *page; void *vaddr; offset = vmf->pgoff << PAGE_SHIFT; vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; page = virt_to_page(vaddr); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct us428ctls_vm_ops = { .fault = snd_us428ctls_vm_fault, }; static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = (unsigned long)(area->vm_end - area->vm_start); struct usx2ydev *us428 = hw->private_data; // FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs? // so as long as the device isn't fully initialised yet we return -EBUSY here. if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT)) return -EBUSY; /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > US428_SHAREDMEM_PAGES) { dev_dbg(hw->card->dev, "%s: mmap size %lu > %lu\n", __func__, size, (unsigned long)US428_SHAREDMEM_PAGES); return -EINVAL; } area->vm_ops = &us428ctls_vm_ops; vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP); area->vm_private_data = hw->private_data; return 0; } static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { __poll_t mask = 0; struct usx2ydev *us428 = hw->private_data; struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem; if (us428->chip_status & USX2Y_STAT_CHIP_HUP) return EPOLLHUP; poll_wait(file, &us428->us428ctls_wait_queue_head, wait); if (shm && shm->ctl_snapshot_last != shm->ctl_snapshot_red) mask |= EPOLLIN; return mask; } static int snd_usx2y_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static const char * const type_ids[USX2Y_TYPE_NUMS] = { [USX2Y_TYPE_122] = "us122", [USX2Y_TYPE_224] = "us224", [USX2Y_TYPE_428] = "us428", }; struct usx2ydev *us428 = hw->private_data; int id = -1; switch (le16_to_cpu(us428->dev->descriptor.idProduct)) { case USB_ID_US122: id = USX2Y_TYPE_122; break; case USB_ID_US224: id = USX2Y_TYPE_224; break; case USB_ID_US428: id = USX2Y_TYPE_428; break; } if (id < 0) return -ENODEV; strscpy(info->id, type_ids[id]); info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code if (us428->chip_status & USX2Y_STAT_CHIP_INIT) info->chip_ready = 1; info->version = USX2Y_DRIVER_VERSION; return 0; } static int usx2y_create_usbmidi(struct snd_card *card) { static const struct snd_usb_midi_endpoint_info quirk_data_1 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x001, .in_cables = 0x001 }; static const struct snd_usb_audio_quirk quirk_1 = { .vendor_name = "TASCAM", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_1 }; static const struct snd_usb_midi_endpoint_info quirk_data_2 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x003, .in_cables = 0x003 }; static const struct snd_usb_audio_quirk quirk_2 = { .vendor_name = "TASCAM", .product_name = "US428", .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_2 }; struct usb_device *dev = usx2y(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); const struct snd_usb_audio_quirk *quirk = le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ? &quirk_2 : &quirk_1; return snd_usbmidi_create(card, iface, &usx2y(card)->midi_list, quirk); } static int usx2y_create_alsa_devices(struct snd_card *card) { int err; err = usx2y_create_usbmidi(card); if (err < 0) { dev_err(card->dev, "%s: usx2y_create_usbmidi error %i\n", __func__, err); return err; } err = usx2y_audio_create(card); if (err < 0) return err; err = usx2y_hwdep_pcm_new(card); if (err < 0) return err; err = snd_card_register(card); if (err < 0) return err; return 0; } static int snd_usx2y_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct usx2ydev *priv = hw->private_data; struct usb_device *dev = priv->dev; int lret, err; char *buf; buf = memdup_user(dsp->image, dsp->length); if (IS_ERR(buf)) return PTR_ERR(buf); err = usb_set_interface(dev, 0, 1); if (err) dev_err(&dev->dev, "usb_set_interface error\n"); else err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000); kfree(buf); if (err) return err; if (dsp->index == 1) { msleep(250); // give the device some time err = usx2y_async_seq04_init(priv); if (err) { dev_err(&dev->dev, "usx2y_async_seq04_init error\n"); return err; } err = usx2y_in04_init(priv); if (err) { dev_err(&dev->dev, "usx2y_in04_init error\n"); return err; } err = usx2y_create_alsa_devices(hw->card); if (err) { dev_err(&dev->dev, "usx2y_create_alsa_devices error %i\n", err); return err; } priv->chip_status |= USX2Y_STAT_CHIP_INIT; } return err; } int usx2y_hwdep_new(struct snd_card *card, struct usb_device *device) { int err; struct snd_hwdep *hw; struct usx2ydev *us428 = usx2y(card); err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw); if (err < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USX2Y; hw->private_data = us428; hw->ops.dsp_status = snd_usx2y_hwdep_dsp_status; hw->ops.dsp_load = snd_usx2y_hwdep_dsp_load; hw->ops.mmap = snd_us428ctls_mmap; hw->ops.poll = snd_us428ctls_poll; hw->exclusive = 1; sprintf(hw->name, "/dev/bus/usb/%03d/%03d", device->bus->busnum, device->devnum); us428->us428ctls_sharedmem = alloc_pages_exact(US428_SHAREDMEM_PAGES, GFP_KERNEL); if (!us428->us428ctls_sharedmem) return -ENOMEM; memset(us428->us428ctls_sharedmem, -1, US428_SHAREDMEM_PAGES); us428->us428ctls_sharedmem->ctl_snapshot_last = -2; return 0; } |
| 59 34 34 3139 3144 3135 3206 3147 3207 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 | // SPDX-License-Identifier: GPL-2.0+ /*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This file implements the usbfs/x/y files, where * x is the bus number and y the device number. * * It allows user space programs/"drivers" to communicate directly * with USB devices without intervening kernel driver. * * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery * (CAN-2005-3055) */ /*****************************************************************************/ #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/quirks.h> #include <linux/cdev.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> #include <linux/dma-mapping.h> #include <asm/byteorder.h> #include <linux/moduleparam.h> #include "usb.h" #ifdef CONFIG_PM #define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND #else #define MAYBE_CAP_SUSPEND 0 #endif #define USB_MAXBUS 64 #define USB_DEVICE_MAX (USB_MAXBUS * 128) #define USB_SG_SIZE 16384 /* split-size for large txs */ /* Mutual exclusion for ps->list in resume vs. release and remove */ static DEFINE_MUTEX(usbfs_mutex); struct usb_dev_state { struct list_head list; /* state list */ struct usb_device *dev; struct file *file; spinlock_t lock; /* protects the async urb lists */ struct list_head async_pending; struct list_head async_completed; struct list_head memory_list; wait_queue_head_t wait; /* wake up if a request completed */ wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */ unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; sigval_t disccontext; unsigned long ifclaimed; u32 disabled_bulk_eps; unsigned long interface_allowed_mask; int not_yet_resumed; bool suspend_allowed; bool privileges_dropped; }; struct usb_memory { struct list_head memlist; int vma_use_count; int urb_use_count; u32 size; void *mem; dma_addr_t dma_handle; unsigned long vm_start; struct usb_dev_state *ps; }; struct async { struct list_head asynclist; struct usb_dev_state *ps; struct pid *pid; const struct cred *cred; unsigned int signr; unsigned int ifnum; void __user *userbuffer; void __user *userurb; sigval_t userurb_sigval; struct urb *urb; struct usb_memory *usbm; unsigned int mem_usage; int status; u8 bulk_addr; u8 bulk_status; }; static bool usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); static unsigned usbfs_snoop_max = 65536; module_param(usbfs_snoop_max, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop_max, "maximum number of bytes to print while snooping"); #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ dev_info(dev, format, ## arg); \ } while (0) enum snoop_when { SUBMIT, COMPLETE }; #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ static u32 usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); /* Hard limit, necessary to avoid arithmetic overflow */ #define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) static DEFINE_SPINLOCK(usbfs_memory_usage_lock); static u64 usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ static int usbfs_increase_memory_usage(u64 amount) { u64 lim, total_mem; unsigned long flags; int ret; lim = READ_ONCE(usbfs_memory_mb); lim <<= 20; ret = 0; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); total_mem = usbfs_memory_usage + amount; if (lim > 0 && total_mem > lim) ret = -ENOMEM; else usbfs_memory_usage = total_mem; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); return ret; } /* Memory for a transfer is being deallocated */ static void usbfs_decrease_memory_usage(u64 amount) { unsigned long flags; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); if (amount > usbfs_memory_usage) usbfs_memory_usage = 0; else usbfs_memory_usage -= amount; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); } static int connected(struct usb_dev_state *ps) { return (!list_empty(&ps->list) && ps->dev->state != USB_STATE_NOTATTACHED); } static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); --*count; if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) { list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); hcd_buffer_free_pages(hcd, usbm->size, usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); } else { spin_unlock_irqrestore(&ps->lock, flags); } } static void usbdev_vm_open(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; unsigned long flags; spin_lock_irqsave(&usbm->ps->lock, flags); ++usbm->vma_use_count; spin_unlock_irqrestore(&usbm->ps->lock, flags); } static void usbdev_vm_close(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; dec_usb_memory_use_count(usbm, &usbm->vma_use_count); } static const struct vm_operations_struct usbdev_vm_ops = { .open = usbdev_vm_open, .close = usbdev_vm_close }; static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_memory *usbm = NULL; struct usb_dev_state *ps = file->private_data; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); if (ret) goto error; usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL); if (!usbm) { ret = -ENOMEM; goto error_decrease_mem; } mem = hcd_buffer_alloc_pages(hcd, size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; } memset(mem, 0, size); usbm->mem = mem; usbm->dma_handle = dma_handle; usbm->size = size; usbm->ps = ps; usbm->vm_start = vma->vm_start; usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); /* * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check * whether we are in such cases, and then use remap_pfn_range (or * dma_mmap_coherent) to map normal (or DMA) pages into the user * space, respectively. */ if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } else { if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &usbdev_vm_ops; vma->vm_private_data = usbm; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&usbm->memlist, &ps->memory_list); spin_unlock_irqrestore(&ps->lock, flags); return 0; error_free_usbm: kfree(usbm); error_decrease_mem: usbfs_decrease_memory_usage(size + sizeof(struct usb_memory)); error: return ret; } static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; unsigned len; loff_t pos; int i; pos = *ppos; usb_lock_device(dev); if (!connected(ps)) { ret = -ENODEV; goto err; } else if (pos < 0) { ret = -EINVAL; goto err; } if (pos < sizeof(struct usb_device_descriptor)) { /* 18 bytes - fits on the stack */ struct usb_device_descriptor temp_desc; memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor)); le16_to_cpus(&temp_desc.bcdUSB); le16_to_cpus(&temp_desc.idVendor); le16_to_cpus(&temp_desc.idProduct); le16_to_cpus(&temp_desc.bcdDevice); len = sizeof(struct usb_device_descriptor) - pos; if (len > nbytes) len = nbytes; if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) { ret = -EFAULT; goto err; } *ppos += len; buf += len; nbytes -= len; ret += len; } pos = sizeof(struct usb_device_descriptor); for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; unsigned int length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ unsigned alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); len = length - (*ppos - pos); if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { alloclen -= (*ppos - pos); if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { ret = -EFAULT; goto err; } } *ppos += len; buf += len; nbytes -= len; ret += len; } pos += length; } err: usb_unlock_device(dev); return ret; } /* * async list handling */ static struct async *alloc_async(unsigned int numisoframes) { struct async *as; as = kzalloc(sizeof(struct async), GFP_KERNEL); if (!as) return NULL; as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL); if (!as->urb) { kfree(as); return NULL; } return as; } static void free_async(struct async *as) { int i; put_pid(as->pid); if (as->cred) put_cred(as->cred); for (i = 0; i < as->urb->num_sgs; i++) { if (sg_page(&as->urb->sg[i])) kfree(sg_virt(&as->urb->sg[i])); } kfree(as->urb->sg); if (as->usbm == NULL) kfree(as->urb->transfer_buffer); else dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count); kfree(as->urb->setup_packet); usb_free_urb(as->urb); usbfs_decrease_memory_usage(as->mem_usage); kfree(as); } static void async_newpending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&as->asynclist, &ps->async_pending); spin_unlock_irqrestore(&ps->lock, flags); } static void async_removepending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); } static struct async *async_getcompleted(struct usb_dev_state *ps) { unsigned long flags; struct async *as = NULL; spin_lock_irqsave(&ps->lock, flags); if (!list_empty(&ps->async_completed)) { as = list_entry(ps->async_completed.next, struct async, asynclist); list_del_init(&as->asynclist); } spin_unlock_irqrestore(&ps->lock, flags); return as; } static struct async *async_getpending(struct usb_dev_state *ps, void __user *userurb) { struct async *as; list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); return as; } return NULL; } static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, int timeout_or_status, enum snoop_when when, unsigned char *data, unsigned data_len) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; int ep; const char *t, *d; if (!usbfs_snoop) return; ep = usb_pipeendpoint(pipe); t = types[usb_pipetype(pipe)]; d = dirs[!!usb_pipein(pipe)]; if (userurb) { /* Async */ if (when == SUBMIT) dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); } else { if (when == SUBMIT) dev_info(&udev->dev, "ep%d %s-%s, length %u, " "timeout %d\n", ep, t, d, length, timeout_or_status); else dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " "status %d\n", ep, t, d, length, timeout_or_status); } data_len = min(data_len, usbfs_snoop_max); if (data && data_len > 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, data, data_len, 1); } } static void snoop_urb_data(struct urb *urb, unsigned len) { int i, size; len = min(len, usbfs_snoop_max); if (!usbfs_snoop || len == 0) return; if (urb->num_sgs == 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, urb->transfer_buffer, len, 1); return; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, sg_virt(&urb->sg[i]), size, 1); len -= size; } } static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) { unsigned i, len, size; if (urb->number_of_packets > 0) /* Isochronous */ len = urb->transfer_buffer_length; else /* Non-Isoc */ len = urb->actual_length; if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; return 0; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; if (copy_to_user(userbuffer, sg_virt(&urb->sg[i]), size)) return -EFAULT; userbuffer += size; len -= size; } return 0; } #define AS_CONTINUATION 1 #define AS_UNLINK 2 static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { struct urb *urb; struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not * including the first one without AS_CONTINUATION. If such an * URB is encountered then a new transfer has already started so * the endpoint doesn't need to be disabled; otherwise it does. */ list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_addr == bulk_addr) { if (as->bulk_status != AS_CONTINUATION) goto rescan; as->bulk_status = AS_UNLINK; as->bulk_addr = 0; } } ps->disabled_bulk_eps |= (1 << bulk_addr); /* Now carefully unlink all the marked pending URBs */ rescan: list_for_each_entry_reverse(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ urb = as->urb; usb_get_urb(urb); spin_unlock(&ps->lock); /* Allow completions */ usb_unlink_urb(urb); usb_put_urb(urb); spin_lock(&ps->lock); goto rescan; } } } static void async_completed(struct urb *urb) { struct async *as = urb->context; struct usb_dev_state *ps = as->ps; struct pid *pid = NULL; const struct cred *cred = NULL; unsigned long flags; sigval_t addr; int signr, errno; spin_lock_irqsave(&ps->lock, flags); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; if (signr) { errno = as->status; addr = as->userurb_sigval; pid = get_pid(as->pid); cred = get_cred(as->cred); } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE, NULL, 0); if (usb_urb_dir_in(urb)) snoop_urb_data(urb, urb->actual_length); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); wake_up(&ps->wait); spin_unlock_irqrestore(&ps->lock, flags); if (signr) { kill_pid_usb_asyncio(signr, errno, addr, pid, cred); put_pid(pid); put_cred(cred); } } static void destroy_async(struct usb_dev_state *ps, struct list_head *list) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); while (!list_empty(list)) { as = list_last_entry(list, struct async, asynclist); list_del_init(&as->asynclist); urb = as->urb; usb_get_urb(urb); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); } static void destroy_async_on_interface(struct usb_dev_state *ps, unsigned int ifnum) { struct list_head *p, *q, hitlist; unsigned long flags; INIT_LIST_HEAD(&hitlist); spin_lock_irqsave(&ps->lock, flags); list_for_each_safe(p, q, &ps->async_pending) if (ifnum == list_entry(p, struct async, asynclist)->ifnum) list_move_tail(p, &hitlist); spin_unlock_irqrestore(&ps->lock, flags); destroy_async(ps, &hitlist); } static void destroy_all_async(struct usb_dev_state *ps) { destroy_async(ps, &ps->async_pending); } /* * interface claims are made only at the request of user level code, * which can also release them (explicitly or by closing files). * they're also undone when devices disconnect. */ static int driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { return -ENODEV; } static void driver_disconnect(struct usb_interface *intf) { struct usb_dev_state *ps = usb_get_intfdata(intf); unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber; if (!ps) return; /* NOTE: this relies on usbcore having canceled and completed * all pending I/O requests; 2.6 does that. */ if (likely(ifnum < 8*sizeof(ps->ifclaimed))) clear_bit(ifnum, &ps->ifclaimed); else dev_warn(&intf->dev, "interface number %u out of range\n", ifnum); usb_set_intfdata(intf, NULL); /* force async requests to complete */ destroy_async_on_interface(ps, ifnum); } /* We don't care about suspend/resume of claimed interfaces */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; } static int driver_resume(struct usb_interface *intf) { return 0; } #ifdef CONFIG_PM /* The following routines apply to the entire device, not interfaces */ void usbfs_notify_suspend(struct usb_device *udev) { /* We don't need to handle this */ } void usbfs_notify_resume(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous remove or release */ mutex_lock(&usbfs_mutex); list_for_each_entry(ps, &udev->filelist, list) { WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); } mutex_unlock(&usbfs_mutex); } #endif struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, .supports_autosuspend = 1, }; static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev = ps->dev; struct usb_interface *intf; int err; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; /* already claimed */ if (test_bit(ifnum, &ps->ifclaimed)) return 0; if (ps->privileges_dropped && !test_bit(ifnum, &ps->interface_allowed_mask)) return -EACCES; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else { unsigned int old_suppress; /* suppress uevents while claiming interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); err = usb_driver_claim_interface(&usbfs_driver, intf, ps); dev_set_uevent_suppress(&intf->dev, old_suppress); } if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; } static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev; struct usb_interface *intf; int err; err = -EINVAL; if (ifnum >= 8*sizeof(ps->ifclaimed)) return err; dev = ps->dev; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { unsigned int old_suppress; /* suppress uevents while releasing interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); usb_driver_release_interface(&usbfs_driver, intf); dev_set_uevent_suppress(&intf->dev, old_suppress); err = 0; } return err; } static int checkintf(struct usb_dev_state *ps, unsigned int ifnum) { if (ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; if (test_bit(ifnum, &ps->ifclaimed)) return 0; /* if not yet claimed, claim it for the driver */ dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim " "interface %u before use\n", task_pid_nr(current), current->comm, ifnum); return claimintf(ps, ifnum); } static int findintfep(struct usb_device *dev, unsigned int ep) { unsigned int i, j, e; struct usb_interface *intf; struct usb_host_interface *alts; struct usb_endpoint_descriptor *endpt; if (ep & ~(USB_DIR_IN|0xf)) return -EINVAL; if (!dev->actconfig) return -ESRCH; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf = dev->actconfig->interface[i]; for (j = 0; j < intf->num_altsetting; j++) { alts = &intf->altsetting[j]; for (e = 0; e < alts->desc.bNumEndpoints; e++) { endpt = &alts->endpoint[e].desc; if (endpt->bEndpointAddress == ep) return alts->desc.bInterfaceNumber; } } } return -ENOENT; } static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype, unsigned int request, unsigned int index) { int ret = 0; struct usb_host_interface *alt_setting; if (ps->dev->state != USB_STATE_UNAUTHENTICATED && ps->dev->state != USB_STATE_ADDRESS && ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype)) return 0; /* * check for the special corner case 'get_device_id' in the printer * class specification, which we always want to allow as it is used * to query things like ink level, etc. */ if (requesttype == 0xa1 && request == 0) { alt_setting = usb_find_alt_setting(ps->dev->actconfig, index >> 8, index & 0xff); if (alt_setting && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER) return 0; } index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: if ((index & ~USB_DIR_IN) == 0) return 0; ret = findintfep(ps->dev, index); if (ret < 0) { /* * Some not fully compliant Win apps seem to get * index wrong and have the endpoint number here * rather than the endpoint address (with the * correct direction). Win does let this through, * so we'll not reject it here but leave it to * the device to not break KVM. But we warn. */ ret = findintfep(ps->dev, index ^ 0x80); if (ret >= 0) dev_info(&ps->dev->dev, "%s: process %i (%s) requesting ep %02x but needs %02x\n", __func__, task_pid_nr(current), current->comm, index, index ^ 0x80); } if (ret >= 0) ret = checkintf(ps, ret); break; case USB_RECIP_INTERFACE: ret = checkintf(ps, index); break; } return ret; } static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev, unsigned char ep) { if (ep & USB_ENDPOINT_DIR_MASK) return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK]; else return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK]; } static int parse_usbdevfs_streams(struct usb_dev_state *ps, struct usbdevfs_streams __user *streams, unsigned int *num_streams_ret, unsigned int *num_eps_ret, struct usb_host_endpoint ***eps_ret, struct usb_interface **intf_ret) { unsigned int i, num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf = NULL; unsigned char ep; int ifnum, ret; if (get_user(num_streams, &streams->num_streams) || get_user(num_eps, &streams->num_eps)) return -EFAULT; if (num_eps < 1 || num_eps > USB_MAXENDPOINTS) return -EINVAL; /* The XHCI controller allows max 2 ^ 16 streams */ if (num_streams_ret && (num_streams < 2 || num_streams > 65536)) return -EINVAL; eps = kmalloc_array(num_eps, sizeof(*eps), GFP_KERNEL); if (!eps) return -ENOMEM; for (i = 0; i < num_eps; i++) { if (get_user(ep, &streams->eps[i])) { ret = -EFAULT; goto error; } eps[i] = ep_to_host_endpoint(ps->dev, ep); if (!eps[i]) { ret = -EINVAL; goto error; } /* usb_alloc/free_streams operate on an usb_interface */ ifnum = findintfep(ps->dev, ep); if (ifnum < 0) { ret = ifnum; goto error; } if (i == 0) { ret = checkintf(ps, ifnum); if (ret < 0) goto error; intf = usb_ifnum_to_if(ps->dev, ifnum); } else { /* Verify all eps belong to the same interface */ if (ifnum != intf->altsetting->desc.bInterfaceNumber) { ret = -EINVAL; goto error; } } } if (num_streams_ret) *num_streams_ret = num_streams; *num_eps_ret = num_eps; *eps_ret = eps; *intf_ret = intf; return 0; error: kfree(eps); return ret; } static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; dev = bus_find_device_by_devt(&usb_bus_type, devt); if (!dev) return NULL; return to_usb_device(dev); } /* * file operations */ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct usb_dev_state *ps; int ret; ret = -ENOMEM; ps = kzalloc(sizeof(struct usb_dev_state), GFP_KERNEL); if (!ps) goto out_free_ps; ret = -ENODEV; /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); if (!dev) goto out_free_ps; usb_lock_device(dev); if (dev->state == USB_STATE_NOTATTACHED) goto out_unlock_device; ret = usb_autoresume_device(dev); if (ret) goto out_unlock_device; ps->dev = dev; ps->file = file; ps->interface_allowed_mask = 0xFFFFFFFF; /* 32 bits */ spin_lock_init(&ps->lock); INIT_LIST_HEAD(&ps->list); INIT_LIST_HEAD(&ps->async_pending); INIT_LIST_HEAD(&ps->async_completed); INIT_LIST_HEAD(&ps->memory_list); init_waitqueue_head(&ps->wait); init_waitqueue_head(&ps->wait_for_resume); ps->disc_pid = get_pid(task_pid(current)); ps->cred = get_current_cred(); smp_wmb(); /* Can't race with resume; the device is already active */ list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; usb_unlock_device(dev); snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); return ret; out_unlock_device: usb_unlock_device(dev); usb_put_dev(dev); out_free_ps: kfree(ps); return ret; } static int usbdev_release(struct inode *inode, struct file *file) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; unsigned int ifnum; struct async *as; usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); list_del_init(&ps->list); mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { if (test_bit(ifnum, &ps->ifclaimed)) releaseintf(ps, ifnum); } destroy_all_async(ps); if (!ps->suspend_allowed) usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); put_cred(ps->cred); as = async_getcompleted(ps); while (as) { free_async(as); as = async_getcompleted(ps); } kfree(ps); return 0; } static void usbfs_blocking_completion(struct urb *urb) { complete((struct completion *) urb->context); } /* * Much like usb_start_wait_urb, but returns status separately from * actual_length and uses a killable wait. */ static int usbfs_start_wait_urb(struct urb *urb, int timeout, unsigned int *actlen) { DECLARE_COMPLETION_ONSTACK(ctx); unsigned long expire; int rc; urb->context = &ctx; urb->complete = usbfs_blocking_completion; *actlen = 0; rc = usb_submit_urb(urb, GFP_KERNEL); if (unlikely(rc)) return rc; expire = (timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT); rc = wait_for_completion_killable_timeout(&ctx, expire); if (rc <= 0) { usb_kill_urb(urb); *actlen = urb->actual_length; if (urb->status != -ENOENT) ; /* Completed before it was killed */ else if (rc < 0) return -EINTR; else return -ETIMEDOUT; } *actlen = urb->actual_length; return urb->status; } static int do_proc_control(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer *ctrl) { struct usb_device *dev = ps->dev; unsigned int tmo; unsigned char *tbuf; unsigned int wLength, actlen; int i, pipe, ret; struct urb *urb = NULL; struct usb_ctrlrequest *dr = NULL; ret = check_ctrlrecip(ps, ctrl->bRequestType, ctrl->bRequest, ctrl->wIndex); if (ret) return ret; wLength = ctrl->wLength; /* To suppress 64k PAGE_SIZE warning */ if (wLength > PAGE_SIZE) return -EINVAL; ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); if (ret) return ret; ret = -ENOMEM; tbuf = (unsigned char *)__get_free_page(GFP_KERNEL); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) goto done; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) goto done; dr->bRequestType = ctrl->bRequestType; dr->bRequest = ctrl->bRequest; dr->wValue = cpu_to_le16(ctrl->wValue); dr->wIndex = cpu_to_le16(ctrl->wIndex); dr->wLength = cpu_to_le16(ctrl->wLength); tmo = ctrl->timeout; snoop(&dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, ctrl->wIndex, ctrl->wLength); if ((ctrl->bRequestType & USB_DIR_IN) && wLength) { pipe = usb_rcvctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen); if (!i && actlen) { if (copy_to_user(ctrl->data, tbuf, actlen)) { ret = -EFAULT; goto done; } } } else { if (wLength) { if (copy_from_user(tbuf, ctrl->data, wLength)) { ret = -EFAULT; goto done; } } pipe = usb_sndctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, tbuf, wLength); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0); } if (i < 0 && i != -EPIPE) { dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL " "failed cmd %s rqt %u rq %u len %u ret %d\n", current->comm, ctrl->bRequestType, ctrl->bRequest, ctrl->wLength, i); } ret = (i < 0 ? i : actlen); done: kfree(dr); usb_free_urb(urb); free_page((unsigned long) tbuf); usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); return ret; } static int proc_control(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ctrltransfer ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return do_proc_control(ps, &ctrl); } static int do_proc_bulk(struct usb_dev_state *ps, struct usbdevfs_bulktransfer *bulk) { struct usb_device *dev = ps->dev; unsigned int tmo, len1, len2, pipe; unsigned char *tbuf; int i, ret; struct urb *urb = NULL; struct usb_host_endpoint *ep; ret = findintfep(ps->dev, bulk->ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; len1 = bulk->len; if (len1 >= (INT_MAX - sizeof(struct urb))) return -EINVAL; if (bulk->ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(dev, bulk->ep & 0x7f); else pipe = usb_sndbulkpipe(dev, bulk->ep & 0x7f); ep = usb_pipe_endpoint(dev, pipe); if (!ep || !usb_endpoint_maxp(&ep->desc)) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; /* * len1 can be almost arbitrarily large. Don't WARN if it's * too big, just fail the request. */ ret = -ENOMEM; tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto done; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, dev, pipe, tbuf, len1, NULL, NULL, ep->desc.bInterval); } else { usb_fill_bulk_urb(urb, dev, pipe, tbuf, len1, NULL, NULL); } tmo = bulk->timeout; if (bulk->ep & 0x80) { snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2); if (!i && len2) { if (copy_to_user(bulk->data, tbuf, len2)) { ret = -EFAULT; goto done; } } } else { if (len1) { if (copy_from_user(tbuf, bulk->data, len1)) { ret = -EFAULT; goto done; } } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0); } ret = (i < 0 ? i : len2); done: usb_free_urb(urb); kfree(tbuf); usbfs_decrease_memory_usage(len1 + sizeof(struct urb)); return ret; } static int proc_bulk(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_bulktransfer bulk; if (copy_from_user(&bulk, arg, sizeof(bulk))) return -EFAULT; return do_proc_bulk(ps, &bulk); } static void check_reset_of_active_ep(struct usb_device *udev, unsigned int epnum, char *ioctl_name) { struct usb_host_endpoint **eps; struct usb_host_endpoint *ep; eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out; ep = eps[epnum & 0x0f]; if (ep && !list_empty(&ep->urb_list)) dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n", task_pid_nr(current), current->comm, ioctl_name, epnum); } static int proc_resetep(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "RESETEP"); usb_reset_endpoint(ps->dev, ep); return 0; } static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int pipe; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT"); if (ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); else pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); return usb_clear_halt(ps->dev, pipe); } static int proc_getdriver(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_getdriver gd; struct usb_interface *intf; int ret; if (copy_from_user(&gd, arg, sizeof(gd))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, gd.interface); if (!intf || !intf->dev.driver) ret = -ENODATA; else { strscpy(gd.driver, intf->dev.driver->name, sizeof(gd.driver)); ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); } return ret; } static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci; memset(&ci, 0, sizeof(ci)); ci.devnum = ps->dev->devnum; ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; } static int proc_conninfo_ex(struct usb_dev_state *ps, void __user *arg, size_t size) { struct usbdevfs_conninfo_ex ci; struct usb_device *udev = ps->dev; if (size < sizeof(ci.size)) return -EINVAL; memset(&ci, 0, sizeof(ci)); ci.size = sizeof(ci); ci.busnum = udev->bus->busnum; ci.devnum = udev->devnum; ci.speed = udev->speed; while (udev && udev->portnum != 0) { if (++ci.num_ports <= ARRAY_SIZE(ci.ports)) ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports] = udev->portnum; udev = udev->parent; } if (ci.num_ports < ARRAY_SIZE(ci.ports)) memmove(&ci.ports[0], &ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports], ci.num_ports); if (copy_to_user(arg, &ci, min(sizeof(ci), size))) return -EFAULT; return 0; } static int proc_resetdevice(struct usb_dev_state *ps) { struct usb_host_config *actconfig = ps->dev->actconfig; struct usb_interface *interface; int i, number; /* Don't allow a device reset if the process has dropped the * privilege to do such things and any of the interfaces are * currently claimed. */ if (ps->privileges_dropped && actconfig) { for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { interface = actconfig->interface[i]; number = interface->cur_altsetting->desc.bInterfaceNumber; if (usb_interface_claimed(interface) && !test_bit(number, &ps->ifclaimed)) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s while '%s' resets device\n", number, interface->dev.driver->name, current->comm); return -EACCES; } } } return usb_reset_device(ps->dev); } static int proc_setintf(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_setinterface setintf; int ret; if (copy_from_user(&setintf, arg, sizeof(setintf))) return -EFAULT; ret = checkintf(ps, setintf.interface); if (ret) return ret; destroy_async_on_interface(ps, setintf.interface); return usb_set_interface(ps->dev, setintf.interface, setintf.altsetting); } static int proc_setconfig(struct usb_dev_state *ps, void __user *arg) { int u; int status = 0; struct usb_host_config *actconfig; if (get_user(u, (int __user *)arg)) return -EFAULT; actconfig = ps->dev->actconfig; /* Don't touch the device if any interfaces are claimed. * It could interfere with other drivers' operations, and if * an interface is claimed by usbfs it could easily deadlock. */ if (actconfig) { int i; for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { if (usb_interface_claimed(actconfig->interface[i])) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s " "while '%s' sets config #%d\n", actconfig->interface[i] ->cur_altsetting ->desc.bInterfaceNumber, actconfig->interface[i] ->dev.driver->name, current->comm, u); status = -EBUSY; break; } } } /* SET_CONFIGURATION is often abused as a "cheap" driver reset, * so avoid usb_set_configuration()'s kick to sysfs */ if (status == 0) { if (actconfig && actconfig->desc.bConfigurationValue == u) status = usb_reset_configuration(ps->dev); else status = usb_set_configuration(ps->dev, u); } return status; } static struct usb_memory * find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb) { struct usb_memory *usbm = NULL, *iter; unsigned long flags; unsigned long uurb_start = (unsigned long)uurb->buffer; spin_lock_irqsave(&ps->lock, flags); list_for_each_entry(iter, &ps->memory_list, memlist) { if (uurb_start >= iter->vm_start && uurb_start < iter->vm_start + iter->size) { if (uurb->buffer_length > iter->vm_start + iter->size - uurb_start) { usbm = ERR_PTR(-EINVAL); } else { usbm = iter; usbm->urb_use_count++; } break; } } spin_unlock_irqrestore(&ps->lock, flags); return usbm; } static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb, struct usbdevfs_iso_packet_desc __user *iso_frame_desc, void __user *arg, sigval_t userurb_sigval) { struct usbdevfs_iso_packet_desc *isopkt = NULL; struct usb_host_endpoint *ep; struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; int i, ret, num_sgs = 0, ifnum = -1; int number_of_packets = 0; unsigned int stream_id = 0; void *buf; bool is_in; bool allow_short = false; bool allow_zero = false; unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT; /* USBDEVFS_URB_ISO_ASAP is a special case */ if (uurb->type == USBDEVFS_URB_TYPE_ISO) mask |= USBDEVFS_URB_ISO_ASAP; if (uurb->flags & ~mask) return -EINVAL; if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { ifnum = findintfep(ps->dev, uurb->endpoint); if (ifnum < 0) return ifnum; ret = checkintf(ps, ifnum); if (ret) return ret; } ep = ep_to_host_endpoint(ps->dev, uurb->endpoint); if (!ep) return -ENOENT; is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0; u = 0; switch (uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ if (uurb->buffer_length < 8) return -EINVAL; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) return -ENOMEM; if (copy_from_user(dr, uurb->buffer, 8)) { ret = -EFAULT; goto error; } if (uurb->buffer_length < (le16_to_cpu(dr->wLength) + 8)) { ret = -EINVAL; goto error; } ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest, le16_to_cpu(dr->wIndex)); if (ret) goto error; uurb->buffer_length = le16_to_cpu(dr->wLength); uurb->buffer += 8; if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) { is_in = true; uurb->endpoint |= USB_DIR_IN; } else { is_in = false; uurb->endpoint &= ~USB_DIR_IN; } if (is_in) allow_short = true; snoop(&ps->dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", dr->bRequestType, dr->bRequest, __le16_to_cpu(dr->wValue), __le16_to_cpu(dr->wIndex), __le16_to_cpu(dr->wLength)); u = sizeof(struct usb_ctrlrequest); break; case USBDEVFS_URB_TYPE_BULK: if (!is_in) allow_zero = true; else allow_short = true; switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; case USB_ENDPOINT_XFER_INT: /* allow single-shot interrupt transfers */ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT; goto interrupt_urb; } num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE); if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize) num_sgs = 0; if (ep->streams) stream_id = uurb->stream_id; break; case USBDEVFS_URB_TYPE_INTERRUPT: if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: if (!is_in) allow_zero = true; else allow_short = true; break; case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) return -EINVAL; if (!usb_endpoint_xfer_isoc(&ep->desc)) return -EINVAL; number_of_packets = uurb->number_of_packets; isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * number_of_packets; isopkt = memdup_user(iso_frame_desc, isofrmlen); if (IS_ERR(isopkt)) { ret = PTR_ERR(isopkt); isopkt = NULL; goto error; } for (totlen = u = 0; u < number_of_packets; u++) { /* * arbitrary limit need for USB 3.1 Gen2 * sizemax: 96 DPs at SSP, 96 * 1024 = 98304 */ if (isopkt[u].length > 98304) { ret = -EINVAL; goto error; } totlen += isopkt[u].length; } u *= sizeof(struct usb_iso_packet_descriptor); uurb->buffer_length = totlen; break; default: return -EINVAL; } if (uurb->buffer_length > 0 && !access_ok(uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } as = alloc_async(number_of_packets); if (!as) { ret = -ENOMEM; goto error; } as->usbm = find_memory_area(ps, uurb); if (IS_ERR(as->usbm)) { ret = PTR_ERR(as->usbm); as->usbm = NULL; goto error; } /* do not use SG buffers when memory mapped segments * are in use */ if (as->usbm) num_sgs = 0; u += sizeof(struct async) + sizeof(struct urb) + (as->usbm ? 0 : uurb->buffer_length) + num_sgs * sizeof(struct scatterlist); ret = usbfs_increase_memory_usage(u); if (ret) goto error; as->mem_usage = u; if (num_sgs) { as->urb->sg = kmalloc_array(num_sgs, sizeof(struct scatterlist), GFP_KERNEL | __GFP_NOWARN); if (!as->urb->sg) { ret = -ENOMEM; goto error; } as->urb->num_sgs = num_sgs; sg_init_table(as->urb->sg, as->urb->num_sgs); totlen = uurb->buffer_length; for (i = 0; i < as->urb->num_sgs; i++) { u = (totlen > USB_SG_SIZE) ? USB_SG_SIZE : totlen; buf = kmalloc(u, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto error; } sg_set_buf(&as->urb->sg[i], buf, u); if (!is_in) { if (copy_from_user(buf, uurb->buffer, u)) { ret = -EFAULT; goto error; } uurb->buffer += u; } totlen -= u; } } else if (uurb->buffer_length > 0) { if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_buffer = as->usbm->mem + (uurb_start - as->usbm->vm_start); } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL | __GFP_NOWARN); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; } if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) { /* * Isochronous input data may end up being * discontiguous if some of the packets are * short. Clear the buffer so that the gaps * don't leak kernel data to userspace. */ memset(as->urb->transfer_buffer, 0, uurb->buffer_length); } } } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); /* This tedious sequence is necessary because the URB_* flags * are internal to the kernel and subject to change, whereas * the USBDEVFS_URB_* flags are a user API and must not be changed. */ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = number_of_packets; as->urb->stream_id = stream_id; if (ep->desc.bInterval) { if (uurb->type == USBDEVFS_URB_TYPE_ISO || ps->dev->speed == USB_SPEED_HIGH || ps->dev->speed >= USB_SPEED_SUPER) as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); else as->urb->interval = ep->desc.bInterval; } as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < number_of_packets; u++) { as->urb->iso_frame_desc[u].offset = totlen; as->urb->iso_frame_desc[u].length = isopkt[u].length; totlen += isopkt[u].length; } kfree(isopkt); isopkt = NULL; as->ps = ps; as->userurb = arg; as->userurb_sigval = userurb_sigval; if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; as->urb->transfer_dma = as->usbm->dma_handle + (uurb_start - as->usbm->vm_start); } else if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); as->cred = get_current_cred(); snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT, NULL, 0); if (!is_in) snoop_urb_data(as->urb, as->urb->transfer_buffer_length); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { spin_lock_irq(&ps->lock); /* Not exactly the endpoint address; the direction bit is * shifted to the 0x10 position so that the value will be * between 0 and 31. */ as->bulk_addr = usb_endpoint_num(&ep->desc) | ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3); /* If this bulk URB is the start of a new transfer, re-enable * the endpoint. Otherwise mark it as a continuation URB. */ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) as->bulk_status = AS_CONTINUATION; else ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); /* Don't accept continuation URBs if the endpoint is * disabled because of an earlier error. */ if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) ret = -EREMOTEIO; else ret = usb_submit_urb(as->urb, GFP_ATOMIC); spin_unlock_irq(&ps->lock); } else { ret = usb_submit_urb(as->urb, GFP_KERNEL); } if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, 0, ret, COMPLETE, NULL, 0); async_removepending(as); goto error; } return 0; error: kfree(isopkt); kfree(dr); if (as) free_async(as); return ret; } static int proc_submiturb(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (copy_from_user(&uurb, arg, sizeof(uurb))) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_ptr = arg; return proc_do_submiturb(ps, &uurb, (((struct usbdevfs_urb __user *)arg)->iso_frame_desc), arg, userurb_sigval); } static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); as = async_getpending(ps, arg); if (!as) { spin_unlock_irqrestore(&ps->lock, flags); return -EINVAL; } urb = as->urb; usb_get_urb(urb); spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); return 0; } static void compute_isochronous_actual_length(struct urb *urb) { unsigned int i; if (urb->number_of_packets > 0) { urb->actual_length = 0; for (i = 0; i < urb->number_of_packets; i++) urb->actual_length += urb->iso_frame_desc[i].actual_length; } } static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; } if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) goto err_out; if (put_user(urb->error_count, &userurb->error_count)) goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) goto err_out; } } if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: return -EFAULT; } static struct async *reap_as(struct usb_dev_state *ps) { DECLARE_WAITQUEUE(wait, current); struct async *as = NULL; struct usb_device *dev = ps->dev; add_wait_queue(&ps->wait, &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); as = async_getcompleted(ps); if (as || !connected(ps)) break; if (signal_pending(current)) break; usb_unlock_device(dev); schedule(); usb_lock_device(dev); } remove_wait_queue(&ps->wait, &wait); set_current_state(TASK_RUNNING); return as; } static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #ifdef CONFIG_COMPAT static int proc_control_compat(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer32 __user *p32) { struct usbdevfs_ctrltransfer ctrl; u32 udata; if (copy_from_user(&ctrl, p32, sizeof(*p32) - sizeof(compat_caddr_t)) || get_user(udata, &p32->data)) return -EFAULT; ctrl.data = compat_ptr(udata); return do_proc_control(ps, &ctrl); } static int proc_bulk_compat(struct usb_dev_state *ps, struct usbdevfs_bulktransfer32 __user *p32) { struct usbdevfs_bulktransfer bulk; compat_caddr_t addr; if (get_user(bulk.ep, &p32->ep) || get_user(bulk.len, &p32->len) || get_user(bulk.timeout, &p32->timeout) || get_user(addr, &p32->data)) return -EFAULT; bulk.data = compat_ptr(addr); return do_proc_bulk(ps, &bulk); } static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal32 ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_int = ds.context; return 0; } static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { struct usbdevfs_urb32 urb32; if (copy_from_user(&urb32, uurb, sizeof(*uurb))) return -EFAULT; kurb->type = urb32.type; kurb->endpoint = urb32.endpoint; kurb->status = urb32.status; kurb->flags = urb32.flags; kurb->buffer = compat_ptr(urb32.buffer); kurb->buffer_length = urb32.buffer_length; kurb->actual_length = urb32.actual_length; kurb->start_frame = urb32.start_frame; kurb->number_of_packets = urb32.number_of_packets; kurb->error_count = urb32.error_count; kurb->signr = urb32.signr; kurb->usercontext = compat_ptr(urb32.usercontext); return 0; } static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg)) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_int = ptr_to_compat(arg); return proc_do_submiturb(ps, &uurb, ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc, arg, userurb_sigval); } static int processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; } if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; } static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #endif static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_ptr = ds.context; return 0; } static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; return claimintf(ps, ifnum); } static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; int ret; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; ret = releaseintf(ps, ifnum); if (ret < 0) return ret; destroy_async_on_interface(ps, ifnum); return 0; } static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl) { int size; void *buf = NULL; int retval = 0; struct usb_interface *intf = NULL; struct usb_driver *driver = NULL; if (ps->privileges_dropped) return -EACCES; if (!connected(ps)) return -ENODEV; /* alloc buffer */ size = _IOC_SIZE(ctl->ioctl_code); if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) { if (copy_from_user(buf, ctl->data, size)) { kfree(buf); return -EFAULT; } } else { memset(buf, 0, size); } } if (ps->dev->state != USB_STATE_CONFIGURED) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; else switch (ctl->ioctl_code) { /* disconnect kernel driver from interface */ case USBDEVFS_DISCONNECT: if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } else retval = -ENODATA; break; /* let kernel drivers try to (re)bind to the interface */ case USBDEVFS_CONNECT: if (!intf->dev.driver) retval = device_attach(&intf->dev); else retval = -EBUSY; break; /* talk directly to the interface's driver */ default: if (intf->dev.driver) driver = to_usb_driver(intf->dev.driver); if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; } else { retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } } /* cleanup and return */ if (retval >= 0 && (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0 && size > 0 && copy_to_user(ctl->data, buf, size) != 0) retval = -EFAULT; kfree(buf); return retval; } static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ioctl ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return proc_ioctl(ps, &ctrl); } #ifdef CONFIG_COMPAT static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg) { struct usbdevfs_ioctl32 ioc32; struct usbdevfs_ioctl ctrl; if (copy_from_user(&ioc32, compat_ptr(arg), sizeof(ioc32))) return -EFAULT; ctrl.ifno = ioc32.ifno; ctrl.ioctl_code = ioc32.ioctl_code; ctrl.data = compat_ptr(ioc32.data); return proc_ioctl(ps, &ctrl); } #endif static int proc_claim_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; int rc; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; rc = usb_hub_claim_port(ps->dev, portnum, ps); if (rc == 0) snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", portnum, task_pid_nr(current), current->comm); return rc; } static int proc_release_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; return usb_hub_release_port(ps->dev, portnum, ps); } static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg) { __u32 caps; caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP | USBDEVFS_CAP_DROP_PRIVILEGES | USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND; if (!ps->dev->bus->no_stop_on_short) caps |= USBDEVFS_CAP_BULK_CONTINUATION; if (ps->dev->bus->sg_tablesize) caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER; if (put_user(caps, (__u32 __user *)arg)) return -EFAULT; return 0; } static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnect_claim dc; struct usb_interface *intf; if (copy_from_user(&dc, arg, sizeof(dc))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, dc.interface); if (!intf) return -EINVAL; if (intf->dev.driver) { struct usb_driver *driver = to_usb_driver(intf->dev.driver); if (ps->privileges_dropped) return -EACCES; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) != 0) return -EBUSY; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) == 0) return -EBUSY; dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } return claimintf(ps, dc.interface); } static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL); kfree(eps); return r; } static int proc_free_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL); kfree(eps); return r; } static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg) { u32 data; if (copy_from_user(&data, arg, sizeof(data))) return -EFAULT; /* This is a one way operation. Once privileges are * dropped, you cannot regain them. You may however reissue * this ioctl to shrink the allowed interfaces mask. */ ps->interface_allowed_mask &= data; ps->privileges_dropped = true; return 0; } static int proc_forbid_suspend(struct usb_dev_state *ps) { int ret = 0; if (ps->suspend_allowed) { ret = usb_autoresume_device(ps->dev); if (ret == 0) ps->suspend_allowed = false; else if (ret != -ENODEV) ret = -EIO; } return ret; } static int proc_allow_suspend(struct usb_dev_state *ps) { if (!connected(ps)) return -ENODEV; WRITE_ONCE(ps->not_yet_resumed, 1); if (!ps->suspend_allowed) { usb_autosuspend_device(ps->dev); ps->suspend_allowed = true; } return 0; } static int proc_wait_for_resume(struct usb_dev_state *ps) { int ret; usb_unlock_device(ps->dev); ret = wait_event_interruptible(ps->wait_for_resume, READ_ONCE(ps->not_yet_resumed) == 0); usb_lock_device(ps->dev); if (ret != 0) return -EINTR; return proc_forbid_suspend(ps); } /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, void __user *p) { struct usb_dev_state *ps = file->private_data; struct inode *inode = file_inode(file); struct usb_device *dev = ps->dev; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; usb_lock_device(dev); /* Reap operations are allowed even after disconnection */ switch (cmd) { case USBDEVFS_REAPURB: snoop(&dev->dev, "%s: REAPURB\n", __func__); ret = proc_reapurb(ps, p); goto done; case USBDEVFS_REAPURBNDELAY: snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ret = proc_reapurbnonblock(ps, p); goto done; #ifdef CONFIG_COMPAT case USBDEVFS_REAPURB32: snoop(&dev->dev, "%s: REAPURB32\n", __func__); ret = proc_reapurb_compat(ps, p); goto done; case USBDEVFS_REAPURBNDELAY32: snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ret = proc_reapurbnonblock_compat(ps, p); goto done; #endif } if (!connected(ps)) { usb_unlock_device(dev); return -ENODEV; } switch (cmd) { case USBDEVFS_CONTROL: snoop(&dev->dev, "%s: CONTROL\n", __func__); ret = proc_control(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK: snoop(&dev->dev, "%s: BULK\n", __func__); ret = proc_bulk(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESETEP: snoop(&dev->dev, "%s: RESETEP\n", __func__); ret = proc_resetep(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESET: snoop(&dev->dev, "%s: RESET\n", __func__); ret = proc_resetdevice(ps); break; case USBDEVFS_CLEAR_HALT: snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__); ret = proc_clearhalt(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_GETDRIVER: snoop(&dev->dev, "%s: GETDRIVER\n", __func__); ret = proc_getdriver(ps, p); break; case USBDEVFS_CONNECTINFO: snoop(&dev->dev, "%s: CONNECTINFO\n", __func__); ret = proc_connectinfo(ps, p); break; case USBDEVFS_SETINTERFACE: snoop(&dev->dev, "%s: SETINTERFACE\n", __func__); ret = proc_setintf(ps, p); break; case USBDEVFS_SETCONFIGURATION: snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__); ret = proc_setconfig(ps, p); break; case USBDEVFS_SUBMITURB: snoop(&dev->dev, "%s: SUBMITURB\n", __func__); ret = proc_submiturb(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; #ifdef CONFIG_COMPAT case USBDEVFS_CONTROL32: snoop(&dev->dev, "%s: CONTROL32\n", __func__); ret = proc_control_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK32: snoop(&dev->dev, "%s: BULK32\n", __func__); ret = proc_bulk_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_DISCSIGNAL32: snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__); ret = proc_disconnectsignal_compat(ps, p); break; case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); ret = proc_submiturb_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_IOCTL32: snoop(&dev->dev, "%s: IOCTL32\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif case USBDEVFS_DISCARDURB: snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p); ret = proc_unlinkurb(ps, p); break; case USBDEVFS_DISCSIGNAL: snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); ret = proc_disconnectsignal(ps, p); break; case USBDEVFS_CLAIMINTERFACE: snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__); ret = proc_claiminterface(ps, p); break; case USBDEVFS_RELEASEINTERFACE: snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__); ret = proc_releaseinterface(ps, p); break; case USBDEVFS_IOCTL: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; case USBDEVFS_CLAIM_PORT: snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); ret = proc_claim_port(ps, p); break; case USBDEVFS_RELEASE_PORT: snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); ret = proc_release_port(ps, p); break; case USBDEVFS_GET_CAPABILITIES: ret = proc_get_capabilities(ps, p); break; case USBDEVFS_DISCONNECT_CLAIM: ret = proc_disconnect_claim(ps, p); break; case USBDEVFS_ALLOC_STREAMS: ret = proc_alloc_streams(ps, p); break; case USBDEVFS_FREE_STREAMS: ret = proc_free_streams(ps, p); break; case USBDEVFS_DROP_PRIVILEGES: ret = proc_drop_privileges(ps, p); break; case USBDEVFS_GET_SPEED: ret = ps->dev->speed; break; case USBDEVFS_FORBID_SUSPEND: ret = proc_forbid_suspend(ps); break; case USBDEVFS_ALLOW_SUSPEND: ret = proc_allow_suspend(ps); break; case USBDEVFS_WAIT_FOR_RESUME: ret = proc_wait_for_resume(ps); break; } /* Handle variable-length commands */ switch (cmd & ~IOCSIZE_MASK) { case USBDEVFS_CONNINFO_EX(0): ret = proc_conninfo_ex(ps, p, _IOC_SIZE(cmd)); break; } done: usb_unlock_device(dev); if (ret >= 0) inode_set_atime_to_ts(inode, current_time(inode)); return ret; } static long usbdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); return ret; } /* No kernel lock - fine */ static __poll_t usbdev_poll(struct file *file, struct poll_table_struct *wait) { struct usb_dev_state *ps = file->private_data; __poll_t mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= EPOLLOUT | EPOLLWRNORM; if (!connected(ps)) mask |= EPOLLHUP; if (list_empty(&ps->list)) mask |= EPOLLERR; return mask; } const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, .llseek = no_seek_end_llseek, .read = usbdev_read, .poll = usbdev_poll, .unlocked_ioctl = usbdev_ioctl, .compat_ioctl = compat_ptr_ioctl, .mmap = usbdev_mmap, .open = usbdev_open, .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct usb_dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); list_del_init(&ps->list); if (ps->discsignr) kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext, ps->disc_pid, ps->cred); } mutex_unlock(&usbfs_mutex); } static int usbdev_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: break; case USB_DEVICE_REMOVE: usbdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { .notifier_call = usbdev_notify, }; static struct cdev usb_device_cdev; int __init usb_devio_init(void) { int retval; retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX, "usb_device"); if (retval) { printk(KERN_ERR "Unable to register minors for usb_device\n"); goto out; } cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { printk(KERN_ERR "Unable to get usb_device major %d\n", USB_DEVICE_MAJOR); goto error_cdev; } usb_register_notify(&usbdev_nb); out: return retval; error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } void usb_devio_cleanup(void) { usb_unregister_notify(&usbdev_nb); cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); } |
| 42 42 42 37 42 42 3 3 3 1 1 1 27 1 1 1 1 1 1 3 27 27 2 2 27 27 27 1 1 1 27 2 27 27 27 16 14 16 16 27 27 36 42 42 2 41 1 42 63 63 63 58 28 58 57 58 58 58 58 42 12 1 1 1 2 2 1 1 1 31 11 42 42 15 9 27 5 22 27 27 12 1 11 12 27 6 42 50 49 49 28 1 16 14 17 1 11 2 1 9 3 2 1 1 51 51 51 51 1 50 50 50 50 50 50 50 43 43 1 1 42 42 42 42 42 17 42 10 42 42 35 36 35 17 2 2 36 15 14 15 36 10 9 10 36 36 14 15 15 51 58 58 58 58 58 58 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 | // SPDX-License-Identifier: GPL-2.0+ // // em28xx-cards.c - driver for Empia EM2800/EM2820/2840 USB // video capture devices // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> // Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> #include "em28xx.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/usb.h> #include <media/tuner.h> #include <media/drv-intf/msp3400.h> #include <media/i2c/saa7115.h> #include <dt-bindings/media/tvp5150.h> #include <media/i2c/tvaudio.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> #include <sound/ac97_codec.h> #define DRIVER_NAME "em28xx" static int tuner = -1; module_param(tuner, int, 0444); MODULE_PARM_DESC(tuner, "tuner type"); static unsigned int disable_ir; module_param(disable_ir, int, 0444); MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); static unsigned int disable_usb_speed_check; module_param(disable_usb_speed_check, int, 0444); MODULE_PARM_DESC(disable_usb_speed_check, "override min bandwidth requirement of 480M bps"); static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); static int usb_xfer_mode = -1; module_param(usb_xfer_mode, int, 0444); MODULE_PARM_DESC(usb_xfer_mode, "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)"); /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */ static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS); struct em28xx_hash_table { unsigned long hash; unsigned int model; unsigned int tuner; }; static void em28xx_pre_card_setup(struct em28xx *dev); /* * Reset sequences for analog/digital modes */ /* Reset for the most [analog] boards */ static const struct em28xx_reg_seq default_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static const struct em28xx_reg_seq default_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Board :Zolid Hybrid Tv Stick */ static struct em28xx_reg_seq zolid_tuner[] = { {EM2820_R08_GPIO_CTRL, 0xfd, 0xff, 100}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 100}, { -1, -1, -1, -1}, }; static struct em28xx_reg_seq zolid_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6a, 0xff, 100}, {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 100}, {EM2880_R04_GPO, 0x04, 0xff, 100}, {EM2880_R04_GPO, 0x0c, 0xff, 100}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 analog */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = { {EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10}, { 0x05, 0xff, 0x10, 10}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 digital */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0x0f, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 (R2) digital */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ static const struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = { {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Board - EM2882 Kworld 315U digital */ static const struct em28xx_reg_seq em2882_kworld_315u_digital[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_330u_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_330u_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* * Evga inDtube * GPIO0 - Enable digital power (s5h1409) - low to enable * GPIO1 - Enable analog power (tvp5150/emp202) - low to enable * GPIO4 - xc3028 reset * GOP3 - s5h1409 reset */ static const struct em28xx_reg_seq evga_indtube_analog[] = { {EM2820_R08_GPIO_CTRL, 0x79, 0xff, 60}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq evga_indtube_digital[] = { {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 1}, { -1, -1, -1, -1}, }; /* * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map: * EM_GPIO_0 - currently unknown * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on) * EM_GPIO_2 - currently unknown * EM_GPIO_3 - currently unknown * EM_GPIO_4 - TDA18271HD/C1 tuner (1 = active, 0 = in reset) * EM_GPIO_5 - LGDT3304 ATSC/QAM demod (1 = active, 0 = in reset) * EM_GPIO_6 - currently unknown * EM_GPIO_7 - currently unknown */ static const struct em28xx_reg_seq kworld_a340_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_ub435q_v3_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, { -1, -1, -1, -1}, }; /* Pinnacle Hybrid Pro eb1a:2881 */ static const struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { {EM2820_R08_GPIO_CTRL, 0xfd, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */ {EM2880_R04_GPO, 0x0c, 0xff, 1}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* * PCTV HD Mini (80e) GPIOs * 0-5: not used * 6: demod reset, active low * 7: LED on, active high */ static const struct em28xx_reg_seq em2874_pctv_80e_digital[] = { {EM28XX_R06_I2C_CLK, 0x45, 0xff, 10}, /*400 KHz*/ {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 100},/*Demod reset*/ {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 10}, { -1, -1, -1, -1}, }; /* * eb1a:2868 Reddo DVB-C USB TV Box * GPIO4 - CU1216L NIM * Other GPIOs seems to be don't care. */ static const struct em28xx_reg_seq reddo_dvb_c_usb_box[] = { {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xde, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, { -1, -1, -1, -1}, }; /* Callback for the most boards */ static const struct em28xx_reg_seq default_tuner_gpio[] = { {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Mute/unmute */ static const struct em28xx_reg_seq compro_unmute_tv_gpio[] = { {EM2820_R08_GPIO_CTRL, 5, 7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq compro_unmute_svid_gpio[] = { {EM2820_R08_GPIO_CTRL, 4, 7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq compro_mute_gpio[] = { {EM2820_R08_GPIO_CTRL, 6, 7, 10}, { -1, -1, -1, -1}, }; /* Terratec AV350 */ static const struct em28xx_reg_seq terratec_av350_mute_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0x7f, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_av350_unmute_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq silvercrest_reg_seq[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x01, 0xf7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq vc211a_enable[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0x07, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0x0f, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0x0b, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq dikom_dk300_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static const struct em28xx_reg_seq leadership_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq leadership_reset[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, { -1, -1, -1, -1}, }; /* * 2013:024f PCTV nanoStick T2 290e * GPIO_6 - demod reset * GPIO_7 - LED */ static const struct em28xx_reg_seq pctv_290e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80}, {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */ { -1, -1, -1, -1}, }; #if 0 static const struct em28xx_reg_seq terratec_h5_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_h5_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, { -1, -1, -1, -1}, }; #endif /* * 2013:024f PCTV DVB-S2 Stick 460e * GPIO_0 - POWER_ON * GPIO_1 - BOOST * GPIO_2 - VUV_LNB (red LED) * GPIO_3 - EXT_12V * GPIO_4 - INT_DEM (DEMOD GPIO_0) * GPIO_5 - INT_LNB * GPIO_6 - RESET_DEM * GPIO_7 - LED (green LED) */ static const struct em28xx_reg_seq pctv_460e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, { 0x0d, 0xff, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ { 0x0d, 0x42, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq c3tech_digital_duo_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf9, 0xff, 35}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20}, { -1, -1, -1, -1}, }; /* * 2013:0258 PCTV DVB-S2 Stick (461e) * GPIO 0 = POWER_ON * GPIO 1 = BOOST * GPIO 2 = VUV_LNB (red LED) * GPIO 3 = #EXT_12V * GPIO 4 = INT_DEM * GPIO 5 = INT_LNB * GPIO 6 = #RESET_DEM * GPIO 7 = P07_LED (green LED) */ static const struct em28xx_reg_seq pctv_461e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 0}, {0x0d, 0xff, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0x3f, 0xff, 100}, /* reset demod */ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 200}, /* reset demod */ {0x0d, 0x42, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0xeb, 0xff, 0}, {EM2874_R5F_TS_ENABLE, 0x84, 0x84, 0}, /* parallel? | null discard */ { -1, -1, -1, -1}, }; #if 0 static const struct em28xx_reg_seq hauppauge_930c_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq hauppauge_930c_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, { -1, -1, -1, -1}, }; #endif /* * 1b80:e425 MaxMedia UB425-TC * 1b80:e1cc Delock 61959 * GPIO_6 - demod reset, 0=active * GPIO_7 - LED, 0=active */ static const struct em28xx_reg_seq maxmedia_ub425_tc[] = { {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ { -1, -1, -1, -1}, }; /* * 2304:0242 PCTV QuatroStick (510e) * GPIO_2: decoder reset, 0=active * GPIO_4: decoder suspend, 0=active * GPIO_6: demod reset, 0=active * GPIO_7: LED, 1=active */ static const struct em28xx_reg_seq pctv_510e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ { -1, -1, -1, -1}, }; /* * 2013:0251 PCTV QuatroStick nano (520e) * GPIO_2: decoder reset, 0=active * GPIO_4: decoder suspend, 0=active * GPIO_6: demod reset, 0=active * GPIO_7: LED, 1=active */ static const struct em28xx_reg_seq pctv_520e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ { -1, -1, -1, -1}, }; /* * 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam * reg 0x80/0x84: * GPIO_0: capturing LED, 0=on, 1=off * GPIO_2: AV mute button, 0=pressed, 1=unpressed * GPIO 3: illumination button, 0=pressed, 1=unpressed * GPIO_6: illumination/flash LED, 0=on, 1=off * reg 0x81/0x85: * GPIO_7: snapshot button, 0=pressed, 1=unpressed */ static const struct em28xx_reg_seq speedlink_vad_laplace_reg_seq[] = { {EM2820_R08_GPIO_CTRL, 0xf7, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xb2, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq pctv_292e[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 950}, {EM2874_R80_GPIO_P0_CTRL, 0xbd, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 410}, {EM2874_R80_GPIO_P0_CTRL, 0x7d, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0x7c, 0xff, 60}, {0x0d, 0x42, 0xff, 50}, {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0}, {-1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_t2_stick_hd[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 600}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbc, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xf8, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 300}, {0x0d, 0x42, 0xff, 1000}, {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0}, {-1, -1, -1, -1}, }; static const struct em28xx_reg_seq plex_px_bcud[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 0}, {EM2874_R50_IR_CONFIG, 0x01, 0xff, 0}, {EM28XX_R06_I2C_CLK, 0x40, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 100}, {EM28XX_R12_VINENABLE, 0x20, 0x20, 0}, {0x0d, 0x42, 0xff, 1000}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, {0x73, 0xfd, 0xff, 100}, {-1, -1, -1, -1}, }; /* * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk * reg 0x80/0x84: * GPIO_0: Yellow LED tuner 1, 0=on, 1=off * GPIO_1: Green LED tuner 1, 0=on, 1=off * GPIO_2: Yellow LED tuner 2, 0=on, 1=off * GPIO_3: Green LED tuner 2, 0=on, 1=off * GPIO_5: Reset #2, 0=active * GPIO_6: Reset #1, 0=active */ static const struct em28xx_reg_seq hauppauge_dualhd_dvb[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 200}, {0x50, 0x04, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xbf, 0xff, 100}, /* demod 1 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xdf, 0xff, 100}, /* demod 2 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R5F_TS_ENABLE, 0x00, 0xff, 50}, /* disable TS filters */ {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50}, {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50}, {-1, -1, -1, -1}, }; /* Hauppauge USB QuadHD */ static struct em28xx_reg_seq hauppauge_usb_quadhd_atsc_reg_seq[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 200}, {0x50, 0x04, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xf0, 100}, /* demod 1 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xd0, 0xf0, 100}, /* demod 2 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100}, {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50}, {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50}, {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50}, {-1, -1, -1, -1}, }; /* * MyGica USB TV Box * GPIO_1,0: 00=Composite audio * 01=Tuner audio * 10=Mute audio * 11=FM radio? (if equipped) * GPIO_2-6: Unused * GPIO_7: ?? */ static const struct em28xx_reg_seq mygica_utv3_composite_audio_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfc, 0xff, 0}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq mygica_utv3_tuner_audio_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfd, 0xff, 0}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq mygica_utv3_suspend_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 0}, { -1, -1, -1, -1}, }; /* * Button definitions */ static const struct em28xx_button std_snapshot_button[] = { { .role = EM28XX_BUTTON_SNAPSHOT, .reg_r = EM28XX_R0C_USBSUSP, .reg_clearing = EM28XX_R0C_USBSUSP, .mask = EM28XX_R0C_USBSUSP_SNAPSHOT, .inverted = 0, }, {-1, 0, 0, 0, 0}, }; static const struct em28xx_button speedlink_vad_laplace_buttons[] = { { .role = EM28XX_BUTTON_SNAPSHOT, .reg_r = EM2874_R85_GPIO_P1_STATE, .mask = 0x80, .inverted = 1, }, { .role = EM28XX_BUTTON_ILLUMINATION, .reg_r = EM2874_R84_GPIO_P0_STATE, .mask = 0x08, .inverted = 1, }, {-1, 0, 0, 0, 0}, }; /* * LED definitions */ static struct em28xx_led speedlink_vad_laplace_leds[] = { { .role = EM28XX_LED_ANALOG_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x01, .inverted = 1, }, { .role = EM28XX_LED_ILLUMINATION, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x40, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led kworld_ub435q_v3_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x80, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led pctv_80e_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x80, .inverted = 0, }, {-1, 0, 0, 0}, }; static struct em28xx_led terratec_grabby_leds[] = { { .role = EM28XX_LED_ANALOG_CAPTURING, .gpio_reg = EM2820_R08_GPIO_CTRL, .gpio_mask = EM_GPIO_3, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led hauppauge_dualhd_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_1, .inverted = 1, }, { .role = EM28XX_LED_DIGITAL_CAPTURING_TS2, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_3, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led hauppauge_usb_quadhd_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_2, .inverted = 1, }, { .role = EM28XX_LED_DIGITAL_CAPTURING_TS2, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_0, .inverted = 1, }, {-1, 0, 0, 0}, }; /* * Board definitions */ const struct em28xx_board em28xx_boards[] = { [EM2750_BOARD_UNKNOWN] = { .name = "EM2710/EM2750/EM2751 webcam grabber", .xclk = EM28XX_XCLK_FREQUENCY_20MHZ, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, } }, }, [EM2800_BOARD_UNKNOWN] = { .name = "Unknown EM2800 video grabber", .is_em2800 = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_UNKNOWN] = { .name = "Unknown EM2750/28xx video grabber", .tuner_type = TUNER_ABSENT, .is_webcam = 1, /* To enable sensor probe */ }, [EM2882_BOARD_ZOLID_HYBRID_TV_STICK] = { .name = ":ZOLID HYBRID TV STICK", .tuner_type = TUNER_XC2028, .tuner_gpio = zolid_tuner, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = zolid_digital, }, [EM2750_BOARD_DLCW_130] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "Huaqi DLCW-130", .valid = EM28XX_BOARD_NOT_VALIDATED, .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2820_BOARD_KWORLD_PVRTV2800RF] = { .name = "Kworld PVR TV 2800 RF", .tuner_type = TUNER_TEMIC_PAL, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_GADMEI_TVR200] = { .name = "Gadmei TVR200", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_TERRATEC_CINERGY_250] = { .name = "Terratec Cinergy 250 USB", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .has_ir_i2c = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_USB_2] = { .name = "Pinnacle PCTV USB 2", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .has_ir_i2c = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_HAUPPAUGE_WINTV_USB_2] = { .name = "Hauppauge WinTV USB 2", .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .decoder = EM28XX_TVP5150, .has_msp34xx = 1, .has_ir_i2c = 1, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = MSP_INPUT_DEFAULT, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, MSP_DSP_IN_SCART, MSP_DSP_IN_SCART), } }, }, [EM2820_BOARD_DLINK_USB_TV] = { .name = "D-Link DUB-T210 TV Tuner", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_HERCULES_SMART_TV_USB2] = { .name = "Hercules Smart TV USB 2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_USB_2_FM1216ME] = { .name = "Pinnacle PCTV USB 2 (Philips FM1216ME)", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_GADMEI_UTV310] = { .name = "Gadmei UTV310", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE] = { .name = "Leadtek Winfast USB II Deluxe", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .has_ir_i2c = 1, .tvaudio_addr = 0x58, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT2_ACTIVE | TDA9887_QSS, .decoder = EM28XX_SAA711X, .adecoder = EM28XX_TVAUDIO, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_AUX, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE5, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, .radio = { .type = EM28XX_RADIO, .amux = EM28XX_AMUX_AUX, } }, [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { .name = "Videology 20K14XUSB USB2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2820_BOARD_SILVERCREST_WEBCAM] = { .name = "Silvercrest Webcam 1.3mpix", .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, } }, }, [EM2821_BOARD_SUPERCOMP_USB_2] = { .name = "Supercomp USB 2.0 TV", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2821_BOARD_USBGEAR_VD204] = { .name = "Usbgear VD204v9", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_NETGMBH_CAM] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "NetGMBH Cam", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2860_BOARD_TYPHOON_DVD_MAKER] = { .name = "Typhoon DVD Maker", .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_GADMEI_UTV330] = { .name = "Gadmei UTV330", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_GADMEI_UTV330PLUS] = { .name = "Gadmei UTV330+", .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .ir_codes = RC_MAP_GADMEI_RM008Z, .decoder = EM28XX_SAA711X, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Cinergy A Hybrid XS", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2861_BOARD_KWORLD_PVRTV_300U] = { .name = "KWorld PVRTV 300U", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_YAKUMO_MOVIE_MIXER] = { .name = "Yakumo MovieMixer", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TVP5150_REFERENCE_DESIGN] = { .name = "EM2860/TVP5150 Reference Design", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_PLEXTOR_PX_TV100U] = { .name = "Plextor ConvertX PX-TV100U", .tuner_type = TUNER_TNF_5335MF, .xclk = EM28XX_XCLK_I2S_MSB_TIMING | EM28XX_XCLK_FREQUENCY_12MHZ, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .has_msp34xx = 1, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, } }, }, /* Those boards with em2870 are DVB Only*/ [EM2870_BOARD_TERRATEC_XS] = { .name = "Terratec Cinergy T XS", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_TERRATEC_XS_MT2060] = { .name = "Terratec Cinergy T XS (MT2060)", .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .tuner_type = TUNER_ABSENT, /* MT2060 */ .has_dvb = 1, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_KWORLD_350U] = { .name = "Kworld 350 U DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_KWORLD_355U] = { .name = "Kworld 355 U DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, }, [EM2870_BOARD_PINNACLE_PCTV_DVB] = { .name = "Pinnacle PCTV DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* MT2060 */ /* djh - I have serious doubts this is right... */ .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_10MHZ, }, [EM2870_BOARD_COMPRO_VIDEOMATE] = { .name = "Compro, VideoMate U3", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* MT2060 */ }, [EM2880_BOARD_TERRATEC_HYBRID_XS_FR] = { .name = "Terratec Hybrid XS Secam", .has_msp34xx = 1, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = terratec_cinergy_USB_XS_FR_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = terratec_cinergy_USB_XS_FR_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_cinergy_USB_XS_FR_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_cinergy_USB_XS_FR_analog, } }, }, [EM2884_BOARD_TERRATEC_H5] = { .name = "Terratec Cinergy H5", .has_dvb = 1, #if 0 .tuner_type = TUNER_PHILIPS_TDA8290, .tuner_addr = 0x41, .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ .tuner_gpio = terratec_h5_gpio, #else .tuner_type = TUNER_ABSENT, #endif .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_TERRATEC_H6] = { .name = "Terratec Cinergy H6 rev. 2", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, #if 0 .tuner_type = TUNER_PHILIPS_TDA8290, .tuner_addr = 0x41, .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ .tuner_gpio = terratec_h5_gpio, #else .tuner_type = TUNER_ABSENT, #endif .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { .name = "Hauppauge WinTV HVR 930C", .has_dvb = 1, #if 0 /* FIXME: Add analog support */ .tuner_type = TUNER_XC5000, .tuner_addr = 0x41, .dvb_gpio = hauppauge_930c_digital, .tuner_gpio = hauppauge_930c_gpio, #else .tuner_type = TUNER_ABSENT, #endif .ir_codes = RC_MAP_HAUPPAUGE, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_C3TECH_DIGITAL_DUO] = { .name = "C3 Tech Digital Duo HDTV/SDTV USB", .has_dvb = 1, /* FIXME: Add analog support - need a saa7136 driver */ .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */ .ir_codes = RC_MAP_EMPTY, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .dvb_gpio = c3tech_digital_duo_digital, }, [EM2884_BOARD_CINERGY_HTC_STICK] = { .name = "Terratec Cinergy HTC Stick", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_ELGATO_EYETV_HYBRID_2008] = { .name = "Elgato EyeTV Hybrid 2008 INT", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = { .name = "Hauppauge WinTV HVR 900", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2] = { .name = "Hauppauge WinTV HVR 900 (R2)", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900R2_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850] = { .name = "Hauppauge WinTV HVR 850", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950] = { .name = "Hauppauge WinTV HVR 950", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_PINNACLE_PCTV_HD_PRO] = { .name = "Pinnacle PCTV HD Pro Stick", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600] = { .name = "AMD ATI TV Wonder HD 600", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_ATI_TV_WONDER_HD_600, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Hybrid XS", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = default_digital, .ir_codes = RC_MAP_TERRATEC_CINERGY_XS, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, /* * maybe there's a reason behind it why Terratec sells the Hybrid XS * as Prodigy XS with a different PID, let's keep it separated for now * maybe we'll need it later on */ [EM2880_BOARD_TERRATEC_PRODIGY_XS] = { .name = "Terratec Prodigy XS", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2820_BOARD_MSI_VOX_USB_2] = { .name = "MSI VOX USB 2.0", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .max_range_640_480 = 1, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_TERRATEC_CINERGY_200] = { .name = "Terratec Cinergy 200 USB", .is_em2800 = 1, .has_ir_i2c = 1, .tuner_type = TUNER_LG_TALN, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_GRABBEEX_USB2800] = { .name = "eMPIA Technology, Inc. GrabBeeX+ Video Encoder", .is_em2800 = 1, .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* capture only board */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_VC211A] = { .name = "Actionmaster/LinXcel/Digitus VC211A", .is_em2800 = 1, .tuner_type = TUNER_ABSENT, /* Capture-only board */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = vc211a_enable, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, .gpio = vc211a_enable, } }, }, [EM2800_BOARD_LEADTEK_WINFAST_USBII] = { .name = "Leadtek Winfast USB II", .is_em2800 = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_KWORLD_USB2800] = { .name = "Kworld USB2800", .is_em2800 = 1, .tuner_type = TUNER_PHILIPS_FCV1236D, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_DVC_90] = { .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U", .tuner_type = TUNER_ABSENT, /* capture only board */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_VGEAR_POCKETTV] = { .name = "V-Gear PocketTV", .is_em2800 = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2] = { .name = "Pixelview PlayTV Box 4 USB 2.0", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_YMEC_TVF_5533MF, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PROLINK_PLAYTV_USB2] = { .name = "SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0", .buttons = std_snapshot_button, .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_YMEC_TVF_5533MF, .tuner_addr = 0x60, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = { .name = "EM2860/SAA711X Reference Design", .buttons = std_snapshot_button, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, } }, }, [EM2874_BOARD_LEADERSHIP_ISDBT] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .xclk = EM28XX_XCLK_FREQUENCY_10MHZ, .name = "EM2874 Leadership ISDBT", .tuner_type = TUNER_ABSENT, .tuner_gpio = leadership_reset, .dvb_gpio = leadership_digital, .has_dvb = 1, }, [EM2880_BOARD_MSI_DIGIVOX_AD] = { .name = "MSI DigiVox A/D", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, } }, }, [EM2880_BOARD_MSI_DIGIVOX_AD_II] = { .name = "MSI DigiVox A/D II", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, } }, }, [EM2880_BOARD_KWORLD_DVB_305U] = { .name = "KWorld DVB-T 305U", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2880_BOARD_KWORLD_DVB_310U] = { .name = "KWorld DVB-T 310U", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { /* S-video has not been tested yet */ .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2882_BOARD_KWORLD_ATSC_315U] = { .name = "KWorld ATSC 315U HDTV TV Box", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_THOMSON_DTT761X, .tuner_gpio = em2882_kworld_315u_tuner_gpio, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .has_dvb = 1, .dvb_gpio = em2882_kworld_315u_digital, .ir_codes = RC_MAP_KWORLD_315U, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, #if 0 /* FIXME: Analog mode - still not ready */ .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .gpio = em2882_kworld_315u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2882_kworld_315u_analog1, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2882_kworld_315u_analog1, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, } }, #endif }, [EM2880_BOARD_EMPIRE_DUAL_TV] = { .name = "Empire dual TV", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2881_BOARD_DNT_DA2_HYBRID] = { .name = "DNT DA2 Hybrid", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { .name = "Pinnacle Hybrid Pro", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = pinnacle_hybrid_pro_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, } }, }, [EM2882_BOARD_PINNACLE_HYBRID_PRO_330E] = { .name = "Pinnacle Hybrid Pro (330e)", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900R2_digital, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2882_BOARD_KWORLD_VS_DVBT] = { .name = "Kworld VS-DVB-T 323UR", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = kworld_330u_digital, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .ir_codes = RC_MAP_KWORLD_315U, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2882_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Cinergy Hybrid T USB XS (em2882)", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_TERRATEC_CINERGY_XS, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2882_BOARD_DIKOM_DK300] = { .name = "Dikom DK300", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = dikom_dk300_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, } }, }, [EM2883_BOARD_KWORLD_HYBRID_330U] = { .name = "Kworld PlusTV HD Hybrid 330", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = kworld_330u_digital, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_EEPROM_ON_BOARD | EM28XX_I2C_EEPROM_KEY_VALID, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = kworld_330u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = kworld_330u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = kworld_330u_analog, } }, }, [EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU] = { .name = "Compro VideoMate ForYou/Stereo", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tvaudio_addr = 0xb0, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .adecoder = EM28XX_TVAUDIO, .mute_gpio = compro_mute_gpio, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = compro_unmute_tv_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = compro_unmute_svid_gpio, } }, }, [EM2860_BOARD_KAIOMY_TVNPC_U2] = { .name = "Kaiomy TVnPC U2", .vchannels = 3, .tuner_type = TUNER_XC2028, .tuner_addr = 0x61, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .tuner_gpio = default_tuner_gpio, .ir_codes = RC_MAP_KAIOMY, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, .radio = { .type = EM28XX_RADIO, .amux = EM28XX_AMUX_LINE_IN, } }, [EM2860_BOARD_EASYCAP] = { .name = "Easy Cap Capture DC-60", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_IODATA_GVMVP_SZ] = { .name = "IO-DATA GV-MVP/SZ", .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tuner_gpio = default_tuner_gpio, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { /* Composite has not been tested yet */ .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_VIDEO, }, { /* S-video has not been tested yet */ .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2860_BOARD_TERRATEC_GRABBY] = { .name = "Terratec Grabby", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, .buttons = std_snapshot_button, .leds = terratec_grabby_leds, }, [EM2860_BOARD_TERRATEC_AV350] = { .name = "Terratec AV350", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .mute_gpio = terratec_av350_mute_gpio, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, } }, }, [EM2860_BOARD_ELGATO_VIDEO_CAPTURE] = { .name = "Elgato Video Capture", .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2882_BOARD_EVGA_INDTUBE] = { .name = "Evga inDtube", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = evga_indtube_digital, .ir_codes = RC_MAP_EVGA_INDTUBE, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = evga_indtube_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = evga_indtube_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = evga_indtube_analog, } }, }, /* * eb1a:2868 Empia EM2870 + Philips CU1216L NIM * (Philips TDA10023 + Infineon TUA6034) */ [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = { .name = "Reddo DVB-C USB TV Box", .tuner_type = TUNER_ABSENT, .tuner_gpio = reddo_dvb_c_usb_box, .has_dvb = 1, }, /* * 1b80:a340 - Empia EM2870, NXP TDA18271HD and LG DT3304, sold * initially as the KWorld PlusTV 340U, then as the UB435-Q. * Early variants have a TDA18271HD/C1, later ones a TDA18271HD/C2 */ [EM2870_BOARD_KWORLD_A340] = { .name = "KWorld PlusTV 340U or UB435-Q (ATSC)", .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */ .has_dvb = 1, .dvb_gpio = kworld_a340_digital, .tuner_gpio = default_tuner_gpio, }, /* * 2013:024f PCTV nanoStick T2 290e. * Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */ [EM28174_BOARD_PCTV_290E] = { .name = "PCTV nanoStick T2 290e", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_290e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:024f PCTV DVB-S2 Stick 460e * Empia EM28174, NXP TDA10071, Conexant CX24118A and Allegro A8293 */ [EM28174_BOARD_PCTV_460E] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (460e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_460e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * eb1a:5006 Honestech VIDBOX NW03 * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2860_BOARD_HT_VIDBOX_NW03] = { .name = "Honestech Vidbox NW03", .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, /* S-VIDEO needs check */ .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 1b80:e425 MaxMedia UB425-TC * Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2 */ [EM2874_BOARD_MAXMEDIA_UB425_TC] = { .name = "MaxMedia UB425-TC", .tuner_type = TUNER_ABSENT, .tuner_gpio = maxmedia_ub425_tc, .has_dvb = 1, .ir_codes = RC_MAP_REDDO, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 2304:0242 PCTV QuatroStick (510e) * Empia EM2884 + Micronas DRX 3926K + NXP TDA18271HDC2 */ [EM2884_BOARD_PCTV_510E] = { .name = "PCTV QuatroStick (510e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_510e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 2013:0251 PCTV QuatroStick nano (520e) * Empia EM2884 + Micronas DRX 3926K + NXP TDA18271HDC2 */ [EM2884_BOARD_PCTV_520E] = { .name = "PCTV QuatroStick nano (520e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_520e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_TERRATEC_HTC_USB_XS] = { .name = "Terratec Cinergy HTC USB XS", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 1b80:e1cc Delock 61959 * Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2 * mostly the same as MaxMedia UB-425-TC but different remote */ [EM2874_BOARD_DELOCK_61959] = { .name = "Delock 61959", .tuner_type = TUNER_ABSENT, .tuner_gpio = maxmedia_ub425_tc, .has_dvb = 1, .ir_codes = RC_MAP_DELOCK_61959, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2 * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 */ [EM2874_BOARD_KWORLD_UB435Q_V2] = { .name = "KWorld USB ATSC TV Stick UB435-Q V2", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .dvb_gpio = kworld_a340_digital, .tuner_gpio = default_tuner_gpio, .def_i2c_bus = 1, }, /* * 1b80:e34c KWorld USB ATSC TV Stick UB435-Q V3 * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 */ [EM2874_BOARD_KWORLD_UB435Q_V3] = { .name = "KWorld USB ATSC TV Stick UB435-Q V3", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .tuner_gpio = kworld_ub435q_v3_digital, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .leds = kworld_ub435q_v3_leds, }, [EM2874_BOARD_PCTV_HD_MINI_80E] = { .name = "Pinnacle PCTV HD Mini", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .dvb_gpio = em2874_pctv_80e_digital, .decoder = EM28XX_NODECODER, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .leds = pctv_80e_leds, }, /* * 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam * Empia EM2765 + OmniVision OV2640 */ [EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = { .name = "SpeedLink Vicious And Devine Laplace webcam", .xclk = EM28XX_XCLK_FREQUENCY_24MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .def_i2c_bus = 1, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .amux = EM28XX_AMUX_VIDEO, .gpio = speedlink_vad_laplace_reg_seq, } }, .buttons = speedlink_vad_laplace_buttons, .leds = speedlink_vad_laplace_leds, }, /* * 2013:0258 PCTV DVB-S2 Stick (461e) * Empia EM28178, Montage M88DS3103, Montage M88TS2022, Allegro A8293 */ [EM28178_BOARD_PCTV_461E] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (461e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_461e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:0259 PCTV DVB-S2 Stick (461e_v2) * Empia EM28178, Montage M88DS3103b, Montage M88TS2022, Allegro A8293 */ [EM28178_BOARD_PCTV_461E_V2] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (461e v2)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_461e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:025f PCTV tripleStick (292e). * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2157 */ [EM28178_BOARD_PCTV_292E] = { .name = "PCTV tripleStick (292e)", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_292e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, [EM2861_BOARD_LEADTEK_VC100] = { .name = "Leadtek VC100", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * eb1a:8179 Terratec Cinergy T2 Stick HD. * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2146 */ [EM28178_BOARD_TERRATEC_T2_STICK_HD] = { .name = "Terratec Cinergy T2 Stick HD", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = terratec_t2_stick_hd, .has_dvb = 1, .ir_codes = RC_MAP_TERRATEC_SLIM_2, }, /* * 3275:0085 PLEX PX-BCUD. * Empia EM28178, TOSHIBA TC90532XBG, Sharp QM1D1C0042 */ [EM28178_BOARD_PLEX_PX_BCUD] = { .name = "PLEX PX-BCUD", .xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .tuner_type = TUNER_ABSENT, .tuner_gpio = plex_px_bcud, .has_dvb = 1, }, /* * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc. * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk. * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = { .name = "Hauppauge WinTV-dualHD DVB", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_SI2157, .tuner_gpio = hauppauge_dualhd_dvb, .has_dvb = 1, .has_dual_ts = 1, .ir_codes = RC_MAP_HAUPPAUGE, .leds = hauppauge_dualhd_leds, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc. * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk. * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = { .name = "Hauppauge WinTV-dualHD 01595 ATSC/QAM", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = hauppauge_dualhd_dvb, .has_dvb = 1, .has_dual_ts = 1, .ir_codes = RC_MAP_HAUPPAUGE, .leds = hauppauge_dualhd_leds, }, /* * 1b80:e349 Magix USB Videowandler-2 * (same chips as Honestech VIDBOX NW03) * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = { .name = "Magix USB Videowandler-2", .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 1f4d:1abe MyGica iGrabber * (same as several other EM2860 devices) * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2860_BOARD_MYGICA_IGRABBER] = { .name = "MyGica iGrabber", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* 2040:826d Hauppauge USB QuadHD * Empia 28274, Max Linear 692 ATSC combo demod/tuner */ [EM2874_BOARD_HAUPPAUGE_USB_QUADHD] = { .name = "Hauppauge USB QuadHD ATSC", .def_i2c_bus = 1, .has_dual_ts = 1, .has_dvb = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = hauppauge_usb_quadhd_atsc_reg_seq, .leds = hauppauge_usb_quadhd_leds, }, /* * eb1a:2860 MyGica UTV3 Analog USB2.0 TV Box * Empia EM2860, Philips SAA7113, NXP TDA9801T demod, * Tena TNF931D-DFDR1 tuner (contains NXP TDA6509A), * ST HCF4052 demux (switches audio to line out), * no audio over USB */ [EM2860_BOARD_MYGICA_UTV3] = { .name = "MyGica UTV3 Analog USB2.0 TV Box", .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ, .tuner_type = TUNER_TENA_TNF_931D_DFDR1, .ir_codes = RC_MAP_MYGICA_UTV3, .decoder = EM28XX_SAA711X, .suspend_gpio = mygica_utv3_suspend_gpio, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = mygica_utv3_composite_audio_gpio, }, { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .gpio = mygica_utv3_tuner_audio_gpio, } }, }, }; EXPORT_SYMBOL_GPL(em28xx_boards); static const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); /* table of devices that work with this driver */ struct usb_device_id em28xx_id_table[] = { { USB_DEVICE(0xeb1a, 0x2750), .driver_info = EM2750_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2751), .driver_info = EM2750_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2800), .driver_info = EM2800_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2710), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2820), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2821), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2860), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2861), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2862), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2863), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2870), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2881), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2883), /* used by :Zolid Hybrid Tv Stick */ .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2868), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2875), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2885), /* MSI Digivox Trio */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0xeb1a, 0xe300), .driver_info = EM2861_BOARD_KWORLD_PVRTV_300U }, { USB_DEVICE(0xeb1a, 0xe303), .driver_info = EM2860_BOARD_KAIOMY_TVNPC_U2 }, { USB_DEVICE(0xeb1a, 0xe305), .driver_info = EM2880_BOARD_KWORLD_DVB_305U }, { USB_DEVICE(0xeb1a, 0xe310), .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD }, { USB_DEVICE(0xeb1a, 0xa313), .driver_info = EM2882_BOARD_KWORLD_ATSC_315U }, { USB_DEVICE(0xeb1a, 0xa316), .driver_info = EM2883_BOARD_KWORLD_HYBRID_330U }, { USB_DEVICE(0xeb1a, 0xe320), .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD_II }, { USB_DEVICE(0xeb1a, 0xe323), .driver_info = EM2882_BOARD_KWORLD_VS_DVBT }, { USB_DEVICE(0xeb1a, 0xe350), .driver_info = EM2870_BOARD_KWORLD_350U }, { USB_DEVICE(0xeb1a, 0xe355), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0xeb1a, 0x2801), .driver_info = EM2800_BOARD_GRABBEEX_USB2800 }, { USB_DEVICE(0xeb1a, 0xe357), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0xeb1a, 0xe359), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0x1b80, 0xe302), /* Kaiser Baas Video to DVD maker */ .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x1b80, 0xe304), /* Kworld DVD Maker 2 */ .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x0ccd, 0x0036), .driver_info = EM2820_BOARD_TERRATEC_CINERGY_250 }, { USB_DEVICE(0x0ccd, 0x004c), .driver_info = EM2880_BOARD_TERRATEC_HYBRID_XS_FR }, { USB_DEVICE(0x0ccd, 0x004f), .driver_info = EM2860_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x005e), .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x0042), .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x0043), .driver_info = EM2870_BOARD_TERRATEC_XS_MT2060 }, { USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS }, { USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS }, { USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */ .driver_info = EM2884_BOARD_TERRATEC_H6 }, { USB_DEVICE(0x0ccd, 0x0084), .driver_info = EM2860_BOARD_TERRATEC_AV350 }, { USB_DEVICE(0x0ccd, 0x0096), .driver_info = EM2860_BOARD_TERRATEC_GRABBY }, { USB_DEVICE(0x0ccd, 0x10AF), .driver_info = EM2860_BOARD_TERRATEC_GRABBY }, { USB_DEVICE(0x0ccd, 0x00b2), .driver_info = EM2884_BOARD_CINERGY_HTC_STICK }, { USB_DEVICE(0x0fd9, 0x0018), .driver_info = EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 }, { USB_DEVICE(0x0fd9, 0x0033), .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE }, { USB_DEVICE(0x185b, 0x2870), .driver_info = EM2870_BOARD_COMPRO_VIDEOMATE }, { USB_DEVICE(0x185b, 0x2041), .driver_info = EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU }, { USB_DEVICE(0x2040, 0x4200), .driver_info = EM2820_BOARD_HAUPPAUGE_WINTV_USB_2 }, { USB_DEVICE(0x2040, 0x4201), .driver_info = EM2820_BOARD_HAUPPAUGE_WINTV_USB_2 }, { USB_DEVICE(0x2040, 0x6500), .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 }, { USB_DEVICE(0x2040, 0x6502), .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2 }, { USB_DEVICE(0x2040, 0x6513), /* HCW HVR-980 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x6517), /* HP HVR-950 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x651b), /* RP HVR-950 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x651f), .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 }, { USB_DEVICE(0x2040, 0x0265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x8265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x026d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x2040, 0x826d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x2040, 0x846d), .driver_info = EM2874_BOARD_HAUPPAUGE_USB_QUADHD }, { USB_DEVICE(0x0438, 0xb002), .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 }, { USB_DEVICE(0x2001, 0xf112), .driver_info = EM2820_BOARD_DLINK_USB_TV }, { USB_DEVICE(0x2304, 0x0207), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x2304, 0x0208), .driver_info = EM2820_BOARD_PINNACLE_USB_2 }, { USB_DEVICE(0x2304, 0x021a), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x2304, 0x0226), .driver_info = EM2882_BOARD_PINNACLE_HYBRID_PRO_330E }, { USB_DEVICE(0x2304, 0x0227), .driver_info = EM2880_BOARD_PINNACLE_PCTV_HD_PRO }, { USB_DEVICE(0x2304, 0x023f), .driver_info = EM2874_BOARD_PCTV_HD_MINI_80E }, { USB_DEVICE(0x0413, 0x6023), .driver_info = EM2800_BOARD_LEADTEK_WINFAST_USBII }, { USB_DEVICE(0x093b, 0xa003), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x093b, 0xa005), .driver_info = EM2861_BOARD_PLEXTOR_PX_TV100U }, { USB_DEVICE(0x04bb, 0x0515), .driver_info = EM2820_BOARD_IODATA_GVMVP_SZ }, { USB_DEVICE(0xeb1a, 0x50a6), .driver_info = EM2860_BOARD_GADMEI_UTV330 }, { USB_DEVICE(0x1b80, 0xa340), .driver_info = EM2870_BOARD_KWORLD_A340 }, { USB_DEVICE(0x1b80, 0xe346), .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 }, { USB_DEVICE(0x1b80, 0xe34c), .driver_info = EM2874_BOARD_KWORLD_UB435Q_V3 }, { USB_DEVICE(0x2013, 0x024f), .driver_info = EM28174_BOARD_PCTV_290E }, { USB_DEVICE(0x2013, 0x024c), .driver_info = EM28174_BOARD_PCTV_460E }, { USB_DEVICE(0x2040, 0x1605), .driver_info = EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C }, { USB_DEVICE(0x1b80, 0xe755), .driver_info = EM2884_BOARD_C3TECH_DIGITAL_DUO }, { USB_DEVICE(0xeb1a, 0x5006), .driver_info = EM2860_BOARD_HT_VIDBOX_NW03 }, { USB_DEVICE(0x1b80, 0xe309), /* Sveon STV40 */ .driver_info = EM2860_BOARD_EASYCAP }, { USB_DEVICE(0x1b80, 0xe425), .driver_info = EM2874_BOARD_MAXMEDIA_UB425_TC }, { USB_DEVICE(0x1f4d, 0x1abe), .driver_info = EM2860_BOARD_MYGICA_IGRABBER }, { USB_DEVICE(0x2304, 0x0242), .driver_info = EM2884_BOARD_PCTV_510E }, { USB_DEVICE(0x2013, 0x0251), .driver_info = EM2884_BOARD_PCTV_520E }, { USB_DEVICE(0x1b80, 0xe1cc), .driver_info = EM2874_BOARD_DELOCK_61959 }, { USB_DEVICE(0x1ae7, 0x9003), .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE }, { USB_DEVICE(0x1ae7, 0x9004), .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE }, { USB_DEVICE(0x2013, 0x0258), .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x8258), /* Bulk transport 461e */ .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x0461), .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x8461), /* Bulk transport 461e v2 */ .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x0259), .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2013, 0x0264), /* Hauppauge WinTV-soloHD 292e SE */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x0413, 0x6f07), .driver_info = EM2861_BOARD_LEADTEK_VC100 }, { USB_DEVICE(0xeb1a, 0x8179), .driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD }, { USB_DEVICE(0x3275, 0x0085), .driver_info = EM28178_BOARD_PLEX_PX_BCUD }, { USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */ .driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN }, { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 }, { }, }; MODULE_DEVICE_TABLE(usb, em28xx_id_table); /* * EEPROM hash table for devices with generic USB IDs */ static const struct em28xx_hash_table em28xx_eeprom_hash[] = { /* P/N: SA 60002070465 Tuner: TVF7533-MF */ {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, {0x166a0441, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028}, {0x63f653bd, EM2870_BOARD_REDDO_DVB_C_USB_BOX, TUNER_ABSENT}, {0x4e913442, EM2882_BOARD_DIKOM_DK300, TUNER_XC2028}, {0x85dd871e, EM2882_BOARD_ZOLID_HYBRID_TV_STICK, TUNER_XC2028}, {0x8f597549, EM2860_BOARD_MYGICA_UTV3, TUNER_TENA_TNF_931D_DFDR1}, }; /* I2C devicelist hash table for devices with generic USB IDs */ static const struct em28xx_hash_table em28xx_i2c_hash[] = { {0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC}, {0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC}, {0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT}, {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT}, {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC}, {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF}, {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT}, {0x27e10080, EM2882_BOARD_ZOLID_HYBRID_TV_STICK, TUNER_XC2028}, {0x840d0484, EM2860_BOARD_MYGICA_UTV3, TUNER_TENA_TNF_931D_DFDR1}, }; /* NOTE: introduce a separate hash table for devices with 16 bit eeproms */ int em28xx_tuner_callback(void *ptr, int component, int command, int arg) { struct em28xx_i2c_bus *i2c_bus = ptr; struct em28xx *dev = i2c_bus->dev; int rc = 0; if (dev->tuner_type != TUNER_XC2028 && dev->tuner_type != TUNER_XC5000) return 0; if (command != XC2028_TUNER_RESET && command != XC5000_TUNER_RESET) return 0; rc = em28xx_gpio_set(dev, dev->board.tuner_gpio); return rc; } EXPORT_SYMBOL_GPL(em28xx_tuner_callback); static inline void em28xx_set_xclk_i2c_speed(struct em28xx *dev) { const struct em28xx_board *board = &em28xx_boards[dev->model]; u8 xclk = board->xclk, i2c_speed = board->i2c_speed; /* * Those are the default values for the majority of boards * Use those values if not specified otherwise at boards entry */ if (!xclk) xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ; em28xx_write_reg(dev, EM28XX_R0F_XCLK, xclk); if (!i2c_speed) i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ; dev->i2c_speed = i2c_speed & 0x03; if (!dev->board.is_em2800) em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, i2c_speed); msleep(50); } static inline void em28xx_set_model(struct em28xx *dev) { dev->board = em28xx_boards[dev->model]; dev->has_msp34xx = dev->board.has_msp34xx; dev->is_webcam = dev->board.is_webcam; em28xx_set_xclk_i2c_speed(dev); /* Should be initialized early, for I2C to work */ dev->def_i2c_bus = dev->board.def_i2c_bus; } /* * Wait until AC97_RESET reports the expected value reliably before proceeding. * We also check that two unrelated registers accesses don't return the same * value to avoid premature return. * This procedure helps ensuring AC97 register accesses are reliable. */ static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev, int expected_feat) { unsigned long timeout = jiffies + msecs_to_jiffies(2000); int feat, powerdown; while (time_is_after_jiffies(timeout)) { feat = em28xx_read_ac97(dev, AC97_RESET); if (feat < 0) return feat; powerdown = em28xx_read_ac97(dev, AC97_POWERDOWN); if (powerdown < 0) return powerdown; if (feat == expected_feat && feat != powerdown) return 0; msleep(50); } dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n"); return -ETIMEDOUT; } /* * Since em28xx_pre_card_setup() requires a proper dev->model, * this won't work for boards with generic PCI IDs */ static void em28xx_pre_card_setup(struct em28xx *dev) { /* * Set the initial XCLK and I2C clock values based on the board * definition */ em28xx_set_xclk_i2c_speed(dev); /* request some modules */ switch (dev->model) { case EM2861_BOARD_PLEXTOR_PX_TV100U: /* Sets the msp34xx I2S speed */ dev->i2s_speed = 2048000; break; case EM2861_BOARD_KWORLD_PVRTV_300U: case EM2880_BOARD_KWORLD_DVB_305U: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x6d); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x7d); usleep_range(10000, 11000); break; case EM2870_BOARD_COMPRO_VIDEOMATE: /* * TODO: someone can do some cleanup here... * not everything's needed */ em28xx_write_reg(dev, EM2880_R04_GPO, 0x00); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x01); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xdc); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc); msleep(70); break; case EM2870_BOARD_TERRATEC_XS_MT2060: /* * this device needs some gpio writes to get the DVB-T * demod work */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); break; case EM2870_BOARD_PINNACLE_PCTV_DVB: /* * this device needs some gpio writes to get the * DVB-T demod work */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); break; case EM2820_BOARD_GADMEI_UTV310: case EM2820_BOARD_MSI_VOX_USB_2: /* enables audio for that devices */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); break; case EM2882_BOARD_KWORLD_ATSC_315U: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x00); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x08); usleep_range(10000, 11000); break; case EM2860_BOARD_KAIOMY_TVNPC_U2: em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1); em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1); em28xx_write_regs(dev, 0x0d, "\x42", 1); em28xx_write_regs(dev, 0x08, "\xfd", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\xff", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\x7f", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\x6b", 1); break; case EM2860_BOARD_EASYCAP: em28xx_write_regs(dev, 0x08, "\xf8", 1); break; case EM2820_BOARD_IODATA_GVMVP_SZ: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); msleep(70); break; case EM2860_BOARD_TERRATEC_GRABBY: /* * HACK?: Ensure AC97 register reading is reliable before * proceeding. In practice, this will wait about 1.6 seconds. */ em28xx_wait_until_ac97_features_equals(dev, 0x6a90); break; } em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); /* Unlock device */ em28xx_set_mode(dev, EM28XX_SUSPEND); } static int em28xx_hint_board(struct em28xx *dev) { int i; if (dev->is_webcam) { if (dev->em28xx_sensor == EM28XX_MT9V011) { dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; } else if (dev->em28xx_sensor == EM28XX_MT9M001 || dev->em28xx_sensor == EM28XX_MT9M111) { dev->model = EM2750_BOARD_UNKNOWN; } /* FIXME: IMPROVE ! */ return 0; } /* * HINT method: EEPROM * * This method works only for boards with eeprom. * Uses a hash of all eeprom bytes. The hash should be * unique for a vendor/tuner pair. * There are a high chance that tuners for different * video standards produce different hashes. */ for (i = 0; i < ARRAY_SIZE(em28xx_eeprom_hash); i++) { if (dev->hash == em28xx_eeprom_hash[i].hash) { dev->model = em28xx_eeprom_hash[i].model; dev->tuner_type = em28xx_eeprom_hash[i].tuner; dev_err(&dev->intf->dev, "Your board has no unique USB ID.\n" "A hint were successfully done, based on eeprom hash.\n" "This method is not 100%% failproof.\n" "If the board were misdetected, please email this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board detected as %s\n", em28xx_boards[dev->model].name); return 0; } } /* * HINT method: I2C attached devices * * This method works for all boards. * Uses a hash of i2c scanned devices. * Devices with the same i2c attached chips will * be considered equal. * This method is less precise than the eeprom one. */ /* user did not request i2c scanning => do it now */ if (!dev->i2c_hash) em28xx_do_i2c_scan(dev, dev->def_i2c_bus); for (i = 0; i < ARRAY_SIZE(em28xx_i2c_hash); i++) { if (dev->i2c_hash == em28xx_i2c_hash[i].hash) { dev->model = em28xx_i2c_hash[i].model; dev->tuner_type = em28xx_i2c_hash[i].tuner; dev_err(&dev->intf->dev, "Your board has no unique USB ID.\n" "A hint were successfully done, based on i2c devicelist hash.\n" "This method is not 100%% failproof.\n" "If the board were misdetected, please email this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board detected as %s\n", em28xx_boards[dev->model].name); return 0; } } dev_err(&dev->intf->dev, "Your board has no unique USB ID and thus need a hint to be detected.\n" "You may try to use card=<n> insmod option to workaround that.\n" "Please send an email with this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board eeprom hash is 0x%08lx\n" "Board i2c devicelist hash is 0x%08lx\n", dev->hash, dev->i2c_hash); dev_err(&dev->intf->dev, "Here is a list of valid choices for the card=<n> insmod option:\n"); for (i = 0; i < em28xx_bcount; i++) { dev_err(&dev->intf->dev, " card=%d -> %s\n", i, em28xx_boards[i].name); } return -1; } static void em28xx_card_setup(struct em28xx *dev) { int i, j, idx; bool duplicate_entry; /* * If the device can be a webcam, seek for a sensor. * If sensor is not found, then it isn't a webcam. */ if (dev->is_webcam) { em28xx_detect_sensor(dev); if (dev->em28xx_sensor == EM28XX_NOSENSOR) /* NOTE: error/unknown sensor/no sensor */ dev->is_webcam = 0; } switch (dev->model) { case EM2750_BOARD_UNKNOWN: case EM2820_BOARD_UNKNOWN: case EM2800_BOARD_UNKNOWN: /* * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the K-WORLD * and if it is found then we decide that we do not have * a DIGIVOX and reset the device to the K-WORLD instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ if (em28xx_hint_board(dev) < 0) { dev_err(&dev->intf->dev, "Board not discovered\n"); } else { em28xx_set_model(dev); em28xx_pre_card_setup(dev); } break; default: em28xx_set_model(dev); } dev_info(&dev->intf->dev, "Identified as %s (card=%d)\n", dev->board.name, dev->model); dev->tuner_type = em28xx_boards[dev->model].tuner_type; /* request some modules */ switch (dev->model) { case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C: case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB: case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595: { struct tveeprom tv; if (!dev->eedata) break; #if defined(CONFIG_MODULES) && defined(MODULE) request_module("tveeprom"); #endif /* Call first TVeeprom */ tveeprom_hauppauge_analog(&tv, dev->eedata); dev->tuner_type = tv.tuner_type; if (tv.audio_processor == TVEEPROM_AUDPROC_MSP) { dev->i2s_speed = 2048000; dev->has_msp34xx = 1; } break; } case EM2882_BOARD_KWORLD_ATSC_315U: em28xx_write_reg(dev, 0x0d, 0x42); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); usleep_range(10000, 11000); break; case EM2820_BOARD_KWORLD_PVRTV2800RF: /* GPIO enables sound on KWORLD PVR TV 2800RF */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf9); break; case EM2820_BOARD_UNKNOWN: case EM2800_BOARD_UNKNOWN: /* * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the K-WORLD * and if it is found then we decide that we do not have * a DIGIVOX and reset the device to the K-WORLD instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ case EM2880_BOARD_MSI_DIGIVOX_AD: if (!em28xx_hint_board(dev)) em28xx_set_model(dev); /* * In cases where we had to use a board hint, the call to * em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, * so make the call now so the analog GPIOs are set properly * before probing the i2c bus. */ em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; /* * The Dikom DK300 is detected as an Kworld VS-DVB-T 323UR. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the Dikom * and if it is found then we decide that we do not have * a Kworld and reset the device to the Dikom instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ case EM2882_BOARD_KWORLD_VS_DVBT: if (!em28xx_hint_board(dev)) em28xx_set_model(dev); /* * In cases where we had to use a board hint, the call to * em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, * so make the call now so the analog GPIOs are set properly * before probing the i2c bus. */ em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; } if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) { dev_err(&dev->intf->dev, "\n\n" "The support for this board weren't valid yet.\n" "Please send a report of having this working\n" "not to V4L mailing list (and/or to other addresses)\n\n"); } /* Free eeprom data memory */ kfree(dev->eedata); dev->eedata = NULL; /* Allow override tuner type by a module parameter */ if (tuner >= 0) dev->tuner_type = tuner; /* * Dynamically generate a list of valid audio inputs for this * specific board, mapping them via enum em28xx_amux. */ idx = 0; for (i = 0; i < MAX_EM28XX_INPUT; i++) { if (!INPUT(i)->type) continue; /* Skip already mapped audio inputs */ duplicate_entry = false; for (j = 0; j < idx; j++) { if (INPUT(i)->amux == dev->amux_map[j]) { duplicate_entry = true; break; } } if (duplicate_entry) continue; dev->amux_map[idx++] = INPUT(i)->amux; } for (; idx < MAX_EM28XX_INPUT; idx++) dev->amux_map[idx] = EM28XX_AMUX_UNUSED; } void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) { memset(ctl, 0, sizeof(*ctl)); ctl->fname = XC2028_DEFAULT_FIRMWARE; ctl->max_len = 64; ctl->mts = em28xx_boards[dev->model].mts_firmware; switch (dev->model) { case EM2880_BOARD_EMPIRE_DUAL_TV: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: case EM2882_BOARD_TERRATEC_HYBRID_XS: case EM2880_BOARD_TERRATEC_HYBRID_XS: case EM2880_BOARD_TERRATEC_HYBRID_XS_FR: case EM2881_BOARD_PINNACLE_HYBRID_PRO: case EM2882_BOARD_ZOLID_HYBRID_TV_STICK: ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E: ctl->demod = XC3028_FE_DEFAULT; break; case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600: ctl->demod = XC3028_FE_DEFAULT; ctl->fname = XC3028L_DEFAULT_FIRMWARE; break; case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: /* FIXME: Better to specify the needed IF */ ctl->demod = XC3028_FE_DEFAULT; break; case EM2883_BOARD_KWORLD_HYBRID_330U: case EM2882_BOARD_DIKOM_DK300: case EM2882_BOARD_KWORLD_VS_DVBT: ctl->demod = XC3028_FE_CHINA; ctl->fname = XC2028_DEFAULT_FIRMWARE; break; case EM2882_BOARD_EVGA_INDTUBE: ctl->demod = XC3028_FE_CHINA; ctl->fname = XC3028L_DEFAULT_FIRMWARE; break; default: ctl->demod = XC3028_FE_OREN538; } } EXPORT_SYMBOL_GPL(em28xx_setup_xc3028); static void request_module_async(struct work_struct *work) { struct em28xx *dev = container_of(work, struct em28xx, request_module_wk); /* * The em28xx extensions can be modules or builtin. If the * modules are already loaded or are built in, those extensions * can be initialised right now. Otherwise, the module init * code will do it. */ /* * Devices with an audio-only intf also have a V4L/DVB/RC * intf. Don't register extensions twice on those devices. */ if (dev->is_audio_only) { #if defined(CONFIG_MODULES) && defined(MODULE) request_module("em28xx-alsa"); #endif return; } em28xx_init_extension(dev); #if defined(CONFIG_MODULES) && defined(MODULE) if (dev->has_video) request_module("em28xx-v4l"); if (dev->usb_audio_type == EM28XX_USB_AUDIO_CLASS) request_module("snd-usb-audio"); else if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR) request_module("em28xx-alsa"); if (dev->board.has_dvb) request_module("em28xx-dvb"); if (dev->board.buttons || ((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir)) request_module("em28xx-rc"); #endif /* CONFIG_MODULES */ } static void request_modules(struct em28xx *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct em28xx *dev) { flush_work(&dev->request_module_wk); } static int em28xx_media_device_init(struct em28xx *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; if (udev->product) media_device_usb_init(mdev, udev, udev->product); else if (udev->manufacturer) media_device_usb_init(mdev, udev, udev->manufacturer); else media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev)); dev->media_dev = mdev; #endif return 0; } static void em28xx_unregister_media_device(struct em28xx *dev) { #ifdef CONFIG_MEDIA_CONTROLLER if (dev->media_dev) { media_device_unregister(dev->media_dev); media_device_cleanup(dev->media_dev); kfree(dev->media_dev); dev->media_dev = NULL; } #endif } /* * em28xx_release_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconnected or at module unload */ static void em28xx_release_resources(struct em28xx *dev) { struct usb_device *udev = interface_to_usbdev(dev->intf); /*FIXME: I2C IR should be disconnected */ mutex_lock(&dev->lock); em28xx_unregister_media_device(dev); if (dev->def_i2c_bus) em28xx_i2c_unregister(dev, 1); em28xx_i2c_unregister(dev, 0); if (dev->ts == PRIMARY_TS) usb_put_dev(udev); /* Mark device as unused */ clear_bit(dev->devno, em28xx_devused); mutex_unlock(&dev->lock); }; /** * em28xx_free_device() - Free em28xx device * * @ref: struct kref for em28xx device * * This is called when all extensions and em28xx core unregisters a device */ void em28xx_free_device(struct kref *ref) { struct em28xx *dev = kref_to_dev(ref); dev_info(&dev->intf->dev, "Freeing device\n"); if (!dev->disconnected) em28xx_release_resources(dev); if (dev->ts == PRIMARY_TS) kfree(dev->alt_max_pkt_size_isoc); kfree(dev); } EXPORT_SYMBOL_GPL(em28xx_free_device); /* * em28xx_init_dev() * allocates and inits the device structs, registers i2c bus and v4l device */ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, struct usb_interface *intf, int minor) { int retval; const char *chip_name = NULL; dev->intf = intf; mutex_init(&dev->ctrl_urb_lock); spin_lock_init(&dev->slock); dev->em28xx_write_regs = em28xx_write_regs; dev->em28xx_read_reg = em28xx_read_reg; dev->em28xx_read_reg_req_len = em28xx_read_reg_req_len; dev->em28xx_write_regs_req = em28xx_write_regs_req; dev->em28xx_read_reg_req = em28xx_read_reg_req; dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800; em28xx_set_model(dev); dev->wait_after_write = 5; /* Based on the Chip ID, set the device configuration */ retval = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); if (retval > 0) { dev->chip_id = retval; switch (dev->chip_id) { case CHIP_ID_EM2800: chip_name = "em2800"; break; case CHIP_ID_EM2710: chip_name = "em2710"; break; case CHIP_ID_EM2750: chip_name = "em2750"; break; case CHIP_ID_EM2765: chip_name = "em2765"; dev->wait_after_write = 0; dev->is_em25xx = 1; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM2820: chip_name = "em2710/2820"; if (le16_to_cpu(udev->descriptor.idVendor) == 0xeb1a) { __le16 idProd = udev->descriptor.idProduct; if (le16_to_cpu(idProd) == 0x2710) chip_name = "em2710"; else if (le16_to_cpu(idProd) == 0x2820) chip_name = "em2820"; } /* NOTE: the em2820 is used in webcams, too ! */ break; case CHIP_ID_EM2840: chip_name = "em2840"; break; case CHIP_ID_EM2860: chip_name = "em2860"; break; case CHIP_ID_EM2870: chip_name = "em2870"; dev->wait_after_write = 0; break; case CHIP_ID_EM2874: chip_name = "em2874"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM28174: chip_name = "em28174"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM28178: chip_name = "em28178"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM2883: chip_name = "em2882/3"; dev->wait_after_write = 0; break; case CHIP_ID_EM2884: chip_name = "em2884"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; } } if (!chip_name) dev_info(&dev->intf->dev, "unknown em28xx chip ID (%d)\n", dev->chip_id); else dev_info(&dev->intf->dev, "chip ID is %s\n", chip_name); em28xx_media_device_init(dev, udev); if (dev->is_audio_only) { retval = em28xx_audio_setup(dev); if (retval) { retval = -ENODEV; goto err_deinit_media; } em28xx_init_extension(dev); return 0; } em28xx_pre_card_setup(dev); rt_mutex_init(&dev->i2c_bus_lock); /* register i2c bus 0 */ if (dev->board.is_em2800) retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM2800); else retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX); if (retval < 0) { dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 0 - error [%d]!\n", __func__, retval); goto err_deinit_media; } /* register i2c bus 1 */ if (dev->def_i2c_bus) { if (dev->is_em25xx) retval = em28xx_i2c_register(dev, 1, EM28XX_I2C_ALGO_EM25XX_BUS_B); else retval = em28xx_i2c_register(dev, 1, EM28XX_I2C_ALGO_EM28XX); if (retval < 0) { dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 1 - error [%d]!\n", __func__, retval); goto err_unreg_i2c; } } /* Do board specific init and eeprom reading */ em28xx_card_setup(dev); return 0; err_unreg_i2c: em28xx_i2c_unregister(dev, 0); err_deinit_media: em28xx_unregister_media_device(dev); return retval; } static int em28xx_duplicate_dev(struct em28xx *dev) { int nr; struct em28xx *sec_dev = kmemdup(dev, sizeof(*sec_dev), GFP_KERNEL); if (!sec_dev) { dev->dev_next = NULL; return -ENOMEM; } /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS); if (nr >= EM28XX_MAXBOARDS) { /* No free device slots */ dev_warn(&dev->intf->dev, ": Supports only %i em28xx boards.\n", EM28XX_MAXBOARDS); kfree(sec_dev); dev->dev_next = NULL; return -ENOMEM; } } while (test_and_set_bit(nr, em28xx_devused)); sec_dev->devno = nr; snprintf(sec_dev->name, 28, "em28xx #%d", nr); sec_dev->dev_next = NULL; dev->dev_next = sec_dev; return 0; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) static void em28xx_check_usb_descriptor(struct em28xx *dev, struct usb_device *udev, struct usb_interface *intf, int alt, int ep, bool *has_vendor_audio, bool *has_video, bool *has_dvb) { const struct usb_endpoint_descriptor *e; int sizedescr, size; /* * NOTE: * * Old logic with support for isoc transfers only was: * 0x82 isoc => analog * 0x83 isoc => audio * 0x84 isoc => digital * * New logic with support for bulk transfers * 0x82 isoc => analog * 0x82 bulk => analog * 0x83 isoc* => audio * 0x84 isoc => digital * 0x84 bulk => analog or digital** * 0x85 isoc => digital TS2 * 0x85 bulk => digital TS2 * (*: audio should always be isoc) * (**: analog, if ep 0x82 is isoc, otherwise digital) * * The new logic preserves backwards compatibility and * reflects the endpoint configurations we have seen * so far. But there might be devices for which this * logic is not sufficient... */ e = &intf->altsetting[alt].endpoint[ep].desc; if (!usb_endpoint_dir_in(e)) return; sizedescr = le16_to_cpu(e->wMaxPacketSize); size = sizedescr & 0x7ff; if (udev->speed == USB_SPEED_HIGH) size = size * hb_mult(sizedescr); /* Only inspect input endpoints */ switch (e->bEndpointAddress) { case 0x82: *has_video = true; if (usb_endpoint_xfer_isoc(e)) { dev->analog_ep_isoc = e->bEndpointAddress; dev->alt_max_pkt_size_isoc[alt] = size; } else if (usb_endpoint_xfer_bulk(e)) { dev->analog_ep_bulk = e->bEndpointAddress; } return; case 0x83: if (usb_endpoint_xfer_isoc(e)) *has_vendor_audio = true; else dev_err(&intf->dev, "error: skipping audio endpoint 0x83, because it uses bulk transfers !\n"); return; case 0x84: if (*has_video && (usb_endpoint_xfer_bulk(e))) { dev->analog_ep_bulk = e->bEndpointAddress; } else { if (usb_endpoint_xfer_isoc(e)) { if (size > dev->dvb_max_pkt_size_isoc) { /* * 2) some manufacturers (e.g. Terratec) * disable endpoints by setting * wMaxPacketSize to 0 bytes for all * alt settings. So far, we've seen * this for DVB isoc endpoints only. */ *has_dvb = true; dev->dvb_ep_isoc = e->bEndpointAddress; dev->dvb_max_pkt_size_isoc = size; dev->dvb_alt_isoc = alt; } } else { *has_dvb = true; dev->dvb_ep_bulk = e->bEndpointAddress; } } return; case 0x85: if (usb_endpoint_xfer_isoc(e)) { if (size > dev->dvb_max_pkt_size_isoc_ts2) { dev->dvb_ep_isoc_ts2 = e->bEndpointAddress; dev->dvb_max_pkt_size_isoc_ts2 = size; dev->dvb_alt_isoc = alt; } } else { dev->dvb_ep_bulk_ts2 = e->bEndpointAddress; } return; } } /* * em28xx_usb_probe() * checks for supported devices */ static int em28xx_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev; struct em28xx *dev = NULL; int retval; bool has_vendor_audio = false, has_video = false, has_dvb = false; int i, nr, try_bulk; const int ifnum = intf->altsetting[0].desc.bInterfaceNumber; char *speed; udev = usb_get_dev(interface_to_usbdev(intf)); /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS); if (nr >= EM28XX_MAXBOARDS) { /* No free device slots */ dev_err(&intf->dev, "Driver supports up to %i em28xx boards.\n", EM28XX_MAXBOARDS); retval = -ENOMEM; goto err_no_slot; } } while (test_and_set_bit(nr, em28xx_devused)); /* Don't register audio interfaces */ if (intf->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { dev_info(&intf->dev, "audio device (%04x:%04x): interface %i, class %i\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), ifnum, intf->altsetting[0].desc.bInterfaceClass); retval = -ENODEV; goto err; } /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto err; } /* compute alternate max packet sizes */ dev->alt_max_pkt_size_isoc = kcalloc(intf->num_altsetting, sizeof(dev->alt_max_pkt_size_isoc[0]), GFP_KERNEL); if (!dev->alt_max_pkt_size_isoc) { kfree(dev); retval = -ENOMEM; goto err; } /* Get endpoints */ for (i = 0; i < intf->num_altsetting; i++) { int ep; for (ep = 0; ep < intf->altsetting[i].desc.bNumEndpoints; ep++) em28xx_check_usb_descriptor(dev, udev, intf, i, ep, &has_vendor_audio, &has_video, &has_dvb); } if (!(has_vendor_audio || has_video || has_dvb)) { retval = -ENODEV; goto err_free; } switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } dev_info(&intf->dev, "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n", udev->manufacturer ? udev->manufacturer : "", udev->product ? udev->product : "", speed, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), ifnum, intf->altsetting->desc.bInterfaceNumber); /* * Make sure we have 480 Mbps of bandwidth, otherwise things like * video stream wouldn't likely work, since 12 Mbps is generally * not enough even for most Digital TV streams. */ if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) { dev_err(&intf->dev, "Device initialization failed.\n"); dev_err(&intf->dev, "Device must be connected to a high-speed USB 2.0 port.\n"); retval = -ENODEV; goto err_free; } kref_init(&dev->ref); dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; dev->is_audio_only = has_vendor_audio && !(has_video || has_dvb); dev->has_video = has_video; dev->ifnum = ifnum; dev->ts = PRIMARY_TS; snprintf(dev->name, 28, "em28xx"); dev->dev_next = NULL; if (has_vendor_audio) { dev_info(&intf->dev, "Audio interface %i found (Vendor Class)\n", ifnum); dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR; } /* Checks if audio is provided by a USB Audio Class intf */ for (i = 0; i < udev->config->desc.bNumInterfaces; i++) { struct usb_interface *uif = udev->config->interface[i]; if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { if (has_vendor_audio) dev_err(&intf->dev, "em28xx: device seems to have vendor AND usb audio class interfaces !\n" "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n"); dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS; break; } } if (has_video) dev_info(&intf->dev, "Video interface %i found:%s%s\n", ifnum, dev->analog_ep_bulk ? " bulk" : "", dev->analog_ep_isoc ? " isoc" : ""); if (has_dvb) dev_info(&intf->dev, "DVB interface %i found:%s%s\n", ifnum, dev->dvb_ep_bulk ? " bulk" : "", dev->dvb_ep_isoc ? " isoc" : ""); dev->num_alt = intf->num_altsetting; if ((unsigned int)card[nr] < em28xx_bcount) dev->model = card[nr]; /* save our data pointer in this intf device */ usb_set_intfdata(intf, dev); /* allocate device struct and check if the device is a webcam */ mutex_init(&dev->lock); retval = em28xx_init_dev(dev, udev, intf, nr); if (retval) goto err_free; if (usb_xfer_mode < 0) { if (dev->is_webcam) try_bulk = 1; else try_bulk = 0; } else { try_bulk = usb_xfer_mode > 0; } /* Disable V4L2 if the device doesn't have a decoder or image sensor */ if (has_video && dev->board.decoder == EM28XX_NODECODER && dev->em28xx_sensor == EM28XX_NOSENSOR) { dev_err(&intf->dev, "Currently, V4L2 is not supported on this model\n"); has_video = false; dev->has_video = false; } if (dev->board.has_dual_ts && (dev->tuner_type != TUNER_ABSENT || INPUT(0)->type)) { /* * The logic with sets alternate is not ready for dual-tuners * which analog modes. */ dev_err(&intf->dev, "We currently don't support analog TV or stream capture on dual tuners.\n"); has_video = false; } /* Select USB transfer types to use */ if (has_video) { if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk)) dev->analog_xfer_bulk = 1; dev_info(&intf->dev, "analog set to %s mode.\n", dev->analog_xfer_bulk ? "bulk" : "isoc"); } if (has_dvb) { if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk)) dev->dvb_xfer_bulk = 1; dev_info(&intf->dev, "dvb set to %s mode.\n", dev->dvb_xfer_bulk ? "bulk" : "isoc"); } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { kref_init(&dev->dev_next->ref); dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && !(has_video || has_dvb); dev->dev_next->has_video = false; dev->dev_next->ifnum = ifnum; dev->dev_next->model = id->driver_info; mutex_init(&dev->dev_next->lock); retval = em28xx_init_dev(dev->dev_next, udev, intf, dev->dev_next->devno); if (retval) goto err_free; dev->dev_next->board.ir_codes = NULL; /* No IR for 2nd tuner */ dev->dev_next->board.has_ir_i2c = 0; /* No IR for 2nd tuner */ if (usb_xfer_mode < 0) { if (dev->dev_next->is_webcam) try_bulk = 1; else try_bulk = 0; } else { try_bulk = usb_xfer_mode > 0; } /* Select USB transfer types to use */ if (has_dvb) { if (!dev->dvb_ep_isoc_ts2 || (try_bulk && dev->dvb_ep_bulk_ts2)) dev->dev_next->dvb_xfer_bulk = 1; dev_info(&dev->intf->dev, "dvb ts2 set to %s mode.\n", dev->dev_next->dvb_xfer_bulk ? "bulk" : "isoc"); } dev->dev_next->dvb_ep_isoc = dev->dvb_ep_isoc_ts2; dev->dev_next->dvb_ep_bulk = dev->dvb_ep_bulk_ts2; dev->dev_next->dvb_max_pkt_size_isoc = dev->dvb_max_pkt_size_isoc_ts2; dev->dev_next->dvb_alt_isoc = dev->dvb_alt_isoc; /* Configure hardware to support TS2*/ if (dev->dvb_xfer_bulk) { /* The ep4 and ep5 are configured for BULK */ em28xx_write_reg(dev, 0x0b, 0x96); mdelay(100); em28xx_write_reg(dev, 0x0b, 0x80); mdelay(100); } else { /* The ep4 and ep5 are configured for ISO */ em28xx_write_reg(dev, 0x0b, 0x96); mdelay(100); em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } } request_modules(dev); /* * Do it at the end, to reduce dynamic configuration changes during * the device init. Yet, as request_modules() can be async, the * topology will likely change after the load of the em28xx subdrivers. */ #ifdef CONFIG_MEDIA_CONTROLLER /* * No need to check the return value, the device will still be * usable without media controller API. */ retval = media_device_register(dev->media_dev); #endif return 0; err_free: kfree(dev->alt_max_pkt_size_isoc); kfree(dev); err: clear_bit(nr, em28xx_devused); err_no_slot: usb_put_dev(udev); return retval; } /* * em28xx_usb_disconnect() * called when the device gets disconnected * video device will be unregistered on v4l2_close in case it is still open */ static void em28xx_usb_disconnect(struct usb_interface *intf) { struct em28xx *dev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; if (dev->dev_next) { dev->dev_next->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->dev_next->name); } dev->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->name); flush_request_modules(dev); em28xx_close_extension(dev); if (dev->dev_next) em28xx_release_resources(dev->dev_next); em28xx_release_resources(dev); if (dev->dev_next) { kref_put(&dev->dev_next->ref, em28xx_free_device); dev->dev_next = NULL; } kref_put(&dev->ref, em28xx_free_device); } static int em28xx_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct em28xx *dev; dev = usb_get_intfdata(intf); if (!dev) return 0; em28xx_suspend_extension(dev); return 0; } static int em28xx_usb_resume(struct usb_interface *intf) { struct em28xx *dev; dev = usb_get_intfdata(intf); if (!dev) return 0; em28xx_resume_extension(dev); return 0; } static struct usb_driver em28xx_usb_driver = { .name = "em28xx", .probe = em28xx_usb_probe, .disconnect = em28xx_usb_disconnect, .suspend = em28xx_usb_suspend, .resume = em28xx_usb_resume, .reset_resume = em28xx_usb_resume, .id_table = em28xx_id_table, }; module_usb_driver(em28xx_usb_driver); |
| 4 1 4 1 4 1 4 1 4 1 4 1 4 1 4 4 2 4 2 4 3 4 1 4 3 4 2 4 1 4 2 2 5 4 8 8 1 1 6 4 4 6 1 1 6 6 4 1 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 | // SPDX-License-Identifier: GPL-2.0 #include <linux/fs.h> #include <linux/security.h> #include <linux/fscrypt.h> #include <linux/fsnotify.h> #include <linux/fileattr.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/namei.h> #include "internal.h" /** * fileattr_fill_xflags - initialize fileattr with xflags * @fa: fileattr pointer * @xflags: FS_XFLAG_* flags * * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All * other fields are zeroed. */ void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags) { memset(fa, 0, sizeof(*fa)); fa->fsx_valid = true; fa->fsx_xflags = xflags; if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE) fa->flags |= FS_IMMUTABLE_FL; if (fa->fsx_xflags & FS_XFLAG_APPEND) fa->flags |= FS_APPEND_FL; if (fa->fsx_xflags & FS_XFLAG_SYNC) fa->flags |= FS_SYNC_FL; if (fa->fsx_xflags & FS_XFLAG_NOATIME) fa->flags |= FS_NOATIME_FL; if (fa->fsx_xflags & FS_XFLAG_NODUMP) fa->flags |= FS_NODUMP_FL; if (fa->fsx_xflags & FS_XFLAG_DAX) fa->flags |= FS_DAX_FL; if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) fa->flags |= FS_PROJINHERIT_FL; } EXPORT_SYMBOL(fileattr_fill_xflags); /** * fileattr_fill_flags - initialize fileattr with flags * @fa: fileattr pointer * @flags: FS_*_FL flags * * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags). * All other fields are zeroed. */ void fileattr_fill_flags(struct file_kattr *fa, u32 flags) { memset(fa, 0, sizeof(*fa)); fa->flags_valid = true; fa->flags = flags; if (fa->flags & FS_SYNC_FL) fa->fsx_xflags |= FS_XFLAG_SYNC; if (fa->flags & FS_IMMUTABLE_FL) fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; if (fa->flags & FS_APPEND_FL) fa->fsx_xflags |= FS_XFLAG_APPEND; if (fa->flags & FS_NODUMP_FL) fa->fsx_xflags |= FS_XFLAG_NODUMP; if (fa->flags & FS_NOATIME_FL) fa->fsx_xflags |= FS_XFLAG_NOATIME; if (fa->flags & FS_DAX_FL) fa->fsx_xflags |= FS_XFLAG_DAX; if (fa->flags & FS_PROJINHERIT_FL) fa->fsx_xflags |= FS_XFLAG_PROJINHERIT; } EXPORT_SYMBOL(fileattr_fill_flags); /** * vfs_fileattr_get - retrieve miscellaneous file attributes * @dentry: the object to retrieve from * @fa: fileattr pointer * * Call i_op->fileattr_get() callback, if exists. * * Return: 0 on success, or a negative error on failure. */ int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); int error; if (!inode->i_op->fileattr_get) return -ENOIOCTLCMD; error = security_inode_file_getattr(dentry, fa); if (error) return error; return inode->i_op->fileattr_get(dentry, fa); } EXPORT_SYMBOL(vfs_fileattr_get); static void fileattr_to_file_attr(const struct file_kattr *fa, struct file_attr *fattr) { __u32 mask = FS_XFLAGS_MASK; memset(fattr, 0, sizeof(struct file_attr)); fattr->fa_xflags = fa->fsx_xflags & mask; fattr->fa_extsize = fa->fsx_extsize; fattr->fa_nextents = fa->fsx_nextents; fattr->fa_projid = fa->fsx_projid; fattr->fa_cowextsize = fa->fsx_cowextsize; } /** * copy_fsxattr_to_user - copy fsxattr to userspace. * @fa: fileattr pointer * @ufa: fsxattr user pointer * * Return: 0 on success, or -EFAULT on failure. */ int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa) { struct fsxattr xfa; __u32 mask = FS_XFLAGS_MASK; memset(&xfa, 0, sizeof(xfa)); xfa.fsx_xflags = fa->fsx_xflags & mask; xfa.fsx_extsize = fa->fsx_extsize; xfa.fsx_nextents = fa->fsx_nextents; xfa.fsx_projid = fa->fsx_projid; xfa.fsx_cowextsize = fa->fsx_cowextsize; if (copy_to_user(ufa, &xfa, sizeof(xfa))) return -EFAULT; return 0; } EXPORT_SYMBOL(copy_fsxattr_to_user); static int file_attr_to_fileattr(const struct file_attr *fattr, struct file_kattr *fa) { __u64 mask = FS_XFLAGS_MASK; if (fattr->fa_xflags & ~mask) return -EINVAL; fileattr_fill_xflags(fa, fattr->fa_xflags); fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK; fa->fsx_extsize = fattr->fa_extsize; fa->fsx_projid = fattr->fa_projid; fa->fsx_cowextsize = fattr->fa_cowextsize; return 0; } static int copy_fsxattr_from_user(struct file_kattr *fa, struct fsxattr __user *ufa) { struct fsxattr xfa; __u32 mask = FS_XFLAGS_MASK; if (copy_from_user(&xfa, ufa, sizeof(xfa))) return -EFAULT; if (xfa.fsx_xflags & ~mask) return -EOPNOTSUPP; fileattr_fill_xflags(fa, xfa.fsx_xflags); fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK; fa->fsx_extsize = xfa.fsx_extsize; fa->fsx_nextents = xfa.fsx_nextents; fa->fsx_projid = xfa.fsx_projid; fa->fsx_cowextsize = xfa.fsx_cowextsize; return 0; } /* * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject * any invalid configurations. * * Note: must be called with inode lock held. */ static int fileattr_set_prepare(struct inode *inode, const struct file_kattr *old_ma, struct file_kattr *fa) { int err; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. */ if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && !capable(CAP_LINUX_IMMUTABLE)) return -EPERM; err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags); if (err) return err; /* * Project Quota ID state is only allowed to change from within the init * namespace. Enforce that restriction only if we are trying to change * the quota ID state. Everything else is allowed in user namespaces. */ if (current_user_ns() != &init_user_ns) { if (old_ma->fsx_projid != fa->fsx_projid) return -EINVAL; if ((old_ma->fsx_xflags ^ fa->fsx_xflags) & FS_XFLAG_PROJINHERIT) return -EINVAL; } else { /* * Caller is allowed to change the project ID. If it is being * changed, make sure that the new value is valid. */ if (old_ma->fsx_projid != fa->fsx_projid && !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid))) return -EINVAL; } /* Check extent size hints. */ if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) return -EINVAL; if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && !S_ISDIR(inode->i_mode)) return -EINVAL; if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -EINVAL; /* * It is only valid to set the DAX flag on regular files and * directories on filesystems. */ if ((fa->fsx_xflags & FS_XFLAG_DAX) && !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) return -EINVAL; /* Extent size hints of zero turn off the flags. */ if (fa->fsx_extsize == 0) fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); if (fa->fsx_cowextsize == 0) fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; return 0; } /** * vfs_fileattr_set - change miscellaneous file attributes * @idmap: idmap of the mount * @dentry: the object to change * @fa: fileattr pointer * * After verifying permissions, call i_op->fileattr_set() callback, if * exists. * * Verifying attributes involves retrieving current attributes with * i_op->fileattr_get(), this also allows initializing attributes that have * not been set by the caller to current values. Inode lock is held * thoughout to prevent racing with another instance. * * Return: 0 on success, or a negative error on failure. */ int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct file_kattr old_ma = {}; int err; if (!inode->i_op->fileattr_set) return -ENOIOCTLCMD; if (!inode_owner_or_capable(idmap, inode)) return -EPERM; inode_lock(inode); err = vfs_fileattr_get(dentry, &old_ma); if (!err) { /* initialize missing bits from old_ma */ if (fa->flags_valid) { fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON; fa->fsx_extsize = old_ma.fsx_extsize; fa->fsx_nextents = old_ma.fsx_nextents; fa->fsx_projid = old_ma.fsx_projid; fa->fsx_cowextsize = old_ma.fsx_cowextsize; } else { fa->flags |= old_ma.flags & ~FS_COMMON_FL; } err = fileattr_set_prepare(inode, &old_ma, fa); if (err) goto out; err = security_inode_file_setattr(dentry, fa); if (err) goto out; err = inode->i_op->fileattr_set(idmap, dentry, fa); if (err) goto out; fsnotify_xattr(dentry); } out: inode_unlock(inode); return err; } EXPORT_SYMBOL(vfs_fileattr_set); int ioctl_getflags(struct file *file, unsigned int __user *argp) { struct file_kattr fa = { .flags_valid = true }; /* hint only */ int err; err = vfs_fileattr_get(file->f_path.dentry, &fa); if (!err) err = put_user(fa.flags, argp); return err; } int ioctl_setflags(struct file *file, unsigned int __user *argp) { struct mnt_idmap *idmap = file_mnt_idmap(file); struct dentry *dentry = file->f_path.dentry; struct file_kattr fa; unsigned int flags; int err; err = get_user(flags, argp); if (!err) { err = mnt_want_write_file(file); if (!err) { fileattr_fill_flags(&fa, flags); err = vfs_fileattr_set(idmap, dentry, &fa); mnt_drop_write_file(file); } } return err; } int ioctl_fsgetxattr(struct file *file, void __user *argp) { struct file_kattr fa = { .fsx_valid = true }; /* hint only */ int err; err = vfs_fileattr_get(file->f_path.dentry, &fa); if (!err) err = copy_fsxattr_to_user(&fa, argp); return err; } int ioctl_fssetxattr(struct file *file, void __user *argp) { struct mnt_idmap *idmap = file_mnt_idmap(file); struct dentry *dentry = file->f_path.dentry; struct file_kattr fa; int err; err = copy_fsxattr_from_user(&fa, argp); if (!err) { err = mnt_want_write_file(file); if (!err) { err = vfs_fileattr_set(idmap, dentry, &fa); mnt_drop_write_file(file); } } return err; } SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename, struct file_attr __user *, ufattr, size_t, usize, unsigned int, at_flags) { struct path filepath __free(path_put) = {}; struct filename *name __free(putname) = NULL; unsigned int lookup_flags = 0; struct file_attr fattr; struct file_kattr fa; int error; BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0); BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST); if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; if (!(at_flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (usize > PAGE_SIZE) return -E2BIG; if (usize < FILE_ATTR_SIZE_VER0) return -EINVAL; name = getname_maybe_null(filename, at_flags); if (IS_ERR(name)) return PTR_ERR(name); if (!name && dfd >= 0) { CLASS(fd, f)(dfd); if (fd_empty(f)) return -EBADF; filepath = fd_file(f)->f_path; path_get(&filepath); } else { error = filename_lookup(dfd, name, lookup_flags, &filepath, NULL); if (error) return error; } error = vfs_fileattr_get(filepath.dentry, &fa); if (error == -ENOIOCTLCMD || error == -ENOTTY) error = -EOPNOTSUPP; if (error) return error; fileattr_to_file_attr(&fa, &fattr); error = copy_struct_to_user(ufattr, usize, &fattr, sizeof(struct file_attr), NULL); return error; } SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename, struct file_attr __user *, ufattr, size_t, usize, unsigned int, at_flags) { struct path filepath __free(path_put) = {}; struct filename *name __free(putname) = NULL; unsigned int lookup_flags = 0; struct file_attr fattr; struct file_kattr fa; int error; BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0); BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST); if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; if (!(at_flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (usize > PAGE_SIZE) return -E2BIG; if (usize < FILE_ATTR_SIZE_VER0) return -EINVAL; error = copy_struct_from_user(&fattr, sizeof(struct file_attr), ufattr, usize); if (error) return error; error = file_attr_to_fileattr(&fattr, &fa); if (error) return error; name = getname_maybe_null(filename, at_flags); if (IS_ERR(name)) return PTR_ERR(name); if (!name && dfd >= 0) { CLASS(fd, f)(dfd); if (fd_empty(f)) return -EBADF; filepath = fd_file(f)->f_path; path_get(&filepath); } else { error = filename_lookup(dfd, name, lookup_flags, &filepath, NULL); if (error) return error; } error = mnt_want_write(filepath.mnt); if (!error) { error = vfs_fileattr_set(mnt_idmap(filepath.mnt), filepath.dentry, &fa); if (error == -ENOIOCTLCMD || error == -ENOTTY) error = -EOPNOTSUPP; mnt_drop_write(filepath.mnt); } return error; } |
| 11 11 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | // SPDX-License-Identifier: GPL-2.0 /* Bluetooth HCI driver model support. */ #include <linux/module.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> static const struct class bt_class = { .name = "bluetooth", }; static void bt_link_release(struct device *dev) { struct hci_conn *conn = to_hci_conn(dev); kfree(conn); } static const struct device_type bt_link = { .name = "link", .release = bt_link_release, }; void hci_conn_init_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); conn->dev.type = &bt_link; conn->dev.class = &bt_class; conn->dev.parent = &hdev->dev; device_initialize(&conn->dev); } void hci_conn_add_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); if (device_is_registered(&conn->dev)) return; dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) bt_dev_err(hdev, "failed to register connection device"); } void hci_conn_del_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); if (!device_is_registered(&conn->dev)) { /* If device_add() has *not* succeeded, use *only* put_device() * to drop the reference count. */ put_device(&conn->dev); return; } /* If there are devices using the connection as parent reset it to NULL * before unregistering the device. */ while (1) { struct device *dev; dev = device_find_any_child(&conn->dev); if (!dev) break; device_move(dev, NULL, DPM_ORDER_DEV_LAST); put_device(dev); } device_unregister(&conn->dev); } static void bt_host_release(struct device *dev) { struct hci_dev *hdev = to_hci_dev(dev); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) hci_release_dev(hdev); else kfree(hdev); module_put(THIS_MODULE); } static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); if (hdev->reset) hdev->reset(hdev); return count; } static DEVICE_ATTR_WO(reset); static struct attribute *bt_host_attrs[] = { &dev_attr_reset.attr, NULL, }; ATTRIBUTE_GROUPS(bt_host); static const struct device_type bt_host = { .name = "host", .release = bt_host_release, .groups = bt_host_groups, }; void hci_init_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; dev->type = &bt_host; dev->class = &bt_class; __module_get(THIS_MODULE); device_initialize(dev); } int __init bt_sysfs_init(void) { return class_register(&bt_class); } void bt_sysfs_cleanup(void) { class_unregister(&bt_class); } |
| 36 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETLINK_H #define __LINUX_NETLINK_H #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/export.h> #include <net/scm.h> #include <uapi/linux/netlink.h> struct net; void do_trace_netlink_extack(const char *msg); static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) { return (struct nlmsghdr *)skb->data; } enum netlink_skb_flags { NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ }; struct netlink_skb_parms { struct scm_creds creds; /* Skb credentials */ __u32 portid; __u32 dst_group; __u32 flags; struct sock *sk; bool nsid_is_set; int nsid; }; #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) #define NETLINK_CTX_SIZE 48 void netlink_table_grab(void); void netlink_table_ungrab(void); #define NL_CFG_F_NONROOT_RECV (1 << 0) #define NL_CFG_F_NONROOT_SEND (1 << 1) /* optional Netlink kernel configuration parameters */ struct netlink_kernel_cfg { unsigned int groups; unsigned int flags; void (*input)(struct sk_buff *skb); int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); void (*release) (struct sock *sk, unsigned long *groups); }; struct sock *__netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg); static inline struct sock * netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) { return __netlink_kernel_create(net, unit, THIS_MODULE, cfg); } /* this can be increased when necessary - don't expose to userland */ #define NETLINK_MAX_COOKIE_LEN 8 #define NETLINK_MAX_FMTMSG_LEN 80 /** * struct netlink_ext_ack - netlink extended ACK report struct * @_msg: message string to report - don't access directly, use * %NL_SET_ERR_MSG * @bad_attr: attribute with error * @policy: policy for a bad attribute * @miss_type: attribute type which was missing * @miss_nest: nest missing an attribute (%NULL if missing top level attr) * @cookie: cookie data to return to userspace (for success) * @cookie_len: actual cookie data length * @_msg_buf: output buffer for formatted message strings - don't access * directly, use %NL_SET_ERR_MSG_FMT */ struct netlink_ext_ack { const char *_msg; const struct nlattr *bad_attr; const struct nla_policy *policy; const struct nlattr *miss_nest; u16 miss_type; u8 cookie[NETLINK_MAX_COOKIE_LEN]; u8 cookie_len; char _msg_buf[NETLINK_MAX_FMTMSG_LEN]; }; /* Always use this macro, this allows later putting the * message into a separate section or such for things * like translation or listing all possible messages. * If string formatting is needed use NL_SET_ERR_MSG_FMT. */ #define NL_SET_ERR_MSG(extack, msg) do { \ static const char __msg[] = msg; \ struct netlink_ext_ack *__extack = (extack); \ \ do_trace_netlink_extack(__msg); \ \ if (__extack) \ __extack->_msg = __msg; \ } while (0) /* We splice fmt with %s at each end even in the snprintf so that both calls * can use the same string constant, avoiding its duplication in .ro */ #define NL_SET_ERR_MSG_FMT(extack, fmt, args...) do { \ struct netlink_ext_ack *__extack = (extack); \ \ if (!__extack) \ break; \ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \ "%s" fmt "%s", "", ##args, "") >= \ NETLINK_MAX_FMTMSG_LEN) \ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \ ##args, "\n"); \ \ do_trace_netlink_extack(__extack->_msg_buf); \ \ __extack->_msg = __extack->_msg_buf; \ } while (0) #define NL_SET_ERR_MSG_MOD(extack, msg) \ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) #define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \ NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args) #define NL_SET_ERR_MSG_WEAK(extack, msg) do { \ if ((extack) && !(extack)->_msg) \ NL_SET_ERR_MSG((extack), msg); \ } while (0) #define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \ if ((extack) && !(extack)->_msg) \ NL_SET_ERR_MSG_MOD((extack), msg); \ } while (0) #define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \ if ((extack)) { \ (extack)->bad_attr = (attr); \ (extack)->policy = (pol); \ } \ } while (0) #define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL) #define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \ static const char __msg[] = msg; \ struct netlink_ext_ack *__extack = (extack); \ \ do_trace_netlink_extack(__msg); \ \ if (__extack) { \ __extack->_msg = __msg; \ __extack->bad_attr = (attr); \ __extack->policy = (pol); \ } \ } while (0) #define NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, pol, fmt, args...) do { \ struct netlink_ext_ack *__extack = (extack); \ \ if (!__extack) \ break; \ \ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \ "%s" fmt "%s", "", ##args, "") >= \ NETLINK_MAX_FMTMSG_LEN) \ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \ ##args, "\n"); \ \ do_trace_netlink_extack(__extack->_msg_buf); \ \ __extack->_msg = __extack->_msg_buf; \ __extack->bad_attr = (attr); \ __extack->policy = (pol); \ } while (0) #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg) #define NL_SET_ERR_MSG_ATTR_FMT(extack, attr, msg, args...) \ NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, NULL, msg, ##args) #define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \ struct netlink_ext_ack *__extack = (extack); \ \ if (__extack) { \ __extack->miss_nest = (nest); \ __extack->miss_type = (type); \ } \ } while (0) #define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \ struct nlattr **__tb = (tb); \ u32 __attr = (type); \ int __retval; \ \ __retval = !__tb[__attr]; \ if (__retval) \ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \ __retval; \ }) static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, u64 cookie) { if (!extack) return; BUILD_BUG_ON(sizeof(extack->cookie) < sizeof(cookie)); memcpy(extack->cookie, &cookie, sizeof(cookie)); extack->cookie_len = sizeof(cookie); } void netlink_kernel_release(struct sock *sk); int __netlink_change_ngroups(struct sock *sk, unsigned int groups); int netlink_change_ngroups(struct sock *sk, unsigned int groups); void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, const struct netlink_ext_ack *extack); int netlink_has_listeners(struct sock *sk, unsigned int group); bool netlink_strict_get_check(struct sk_buff *skb); int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data); int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation, netlink_filter_fn filter, void *filter_data); int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); int netlink_register_notifier(struct notifier_block *nb); int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfd(int fd); int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb); static inline struct sk_buff * netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *nskb; nskb = skb_clone(skb, gfp_mask); if (!nskb) return NULL; /* This is a large skb, set destructor callback to release head */ if (is_vmalloc_addr(skb->head)) nskb->destructor = skb->destructor; return nskb; } /* * skb should fit one page. This choice is good for headerless malloc. * But we should limit to 8K so that userspace does not have to * use enormous buffer sizes on recvmsg() calls just to avoid * MSG_TRUNC when PAGE_SIZE is very large. */ #if PAGE_SIZE < 8192UL #define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) #else #define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) #endif #define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); void *data; /* the module that dump function belong to */ struct module *module; struct netlink_ext_ack *extack; u16 family; u16 answer_flags; u32 min_dump_alloc; unsigned int prev_seq, seq; int flags; bool strict_check; union { u8 ctx[NETLINK_CTX_SIZE]; /* args is deprecated. Cast a struct over ctx instead * for proper type safety. */ long args[6]; }; }; #define NL_ASSERT_CTX_FITS(type_name) \ BUILD_BUG_ON(sizeof(type_name) > \ sizeof_field(struct netlink_callback, ctx)) struct netlink_notify { struct net *net; u32 portid; int protocol; }; struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); struct netlink_dump_control { int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); struct netlink_ext_ack *extack; void *data; struct module *module; u32 min_dump_alloc; int flags; }; int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control); static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { if (!control->module) control->module = THIS_MODULE; return __netlink_dump_start(ssk, skb, nlh, control); } struct netlink_tap { struct net_device *dev; struct module *module; struct list_head list; }; int netlink_add_tap(struct netlink_tap *nt); int netlink_remove_tap(struct netlink_tap *nt); bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *ns, int cap); bool netlink_ns_capable(const struct sk_buff *skb, struct user_namespace *ns, int cap); bool netlink_capable(const struct sk_buff *skb, int cap); bool netlink_net_capable(const struct sk_buff *skb, int cap); struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast); #endif /* __LINUX_NETLINK_H */ |
| 306 307 9 9 1477 1478 9 1 9 9 9 8 9 9 9 9 9 9 9 9 9 9 1479 1478 1480 1189 1186 1189 62 62 62 305 307 307 3222 166 307 307 307 64 63 190 187 189 189 475 237 217 152 7 7 1480 1480 3230 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Security plug functions * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2016 Mellanox Technologies * Copyright (C) 2023 Microsoft Corporation <paul@paul-moore.com> */ #define pr_fmt(fmt) "LSM: " fmt #include <linux/bpf.h> #include <linux/capability.h> #include <linux/dcache.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kernel_read_file.h> #include <linux/lsm_hooks.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/personality.h> #include <linux/backing-dev.h> #include <linux/string.h> #include <linux/xattr.h> #include <linux/msg.h> #include <linux/overflow.h> #include <linux/perf_event.h> #include <linux/fs.h> #include <net/flow.h> #include <net/sock.h> #include "lsm.h" /* * These are descriptions of the reasons that can be passed to the * security_locked_down() LSM hook. Placing this array here allows * all security modules to use the same descriptions for auditing * purposes. */ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = { [LOCKDOWN_NONE] = "none", [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading", [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port", [LOCKDOWN_EFI_TEST] = "/dev/efi_test access", [LOCKDOWN_KEXEC] = "kexec of unsigned images", [LOCKDOWN_HIBERNATION] = "hibernation", [LOCKDOWN_PCI_ACCESS] = "direct PCI access", [LOCKDOWN_IOPORT] = "raw io port access", [LOCKDOWN_MSR] = "raw MSR access", [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables", [LOCKDOWN_DEVICE_TREE] = "modifying device tree contents", [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage", [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO", [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", [LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM", [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", [LOCKDOWN_XFRM_SECRET] = "xfrm SA secret", [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; bool lsm_debug __ro_after_init; unsigned int lsm_active_cnt __ro_after_init; const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; struct lsm_blob_sizes blob_sizes; struct kmem_cache *lsm_file_cache; struct kmem_cache *lsm_inode_cache; #define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX /* * Identifier for the LSM static calls. * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT */ #define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX /* * Call the macro M for each LSM hook MAX_LSM_COUNT times. */ #define LSM_LOOP_UNROLL(M, ...) \ do { \ UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ } while (0) #define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) #ifdef CONFIG_HAVE_STATIC_CALL #define LSM_HOOK_TRAMP(NAME, NUM) \ &STATIC_CALL_TRAMP(LSM_STATIC_CALL(NAME, NUM)) #else #define LSM_HOOK_TRAMP(NAME, NUM) NULL #endif /* * Define static calls and static keys for each LSM hook. */ #define DEFINE_LSM_STATIC_CALL(NUM, NAME, RET, ...) \ DEFINE_STATIC_CALL_NULL(LSM_STATIC_CALL(NAME, NUM), \ *((RET(*)(__VA_ARGS__))NULL)); \ DEFINE_STATIC_KEY_FALSE(SECURITY_HOOK_ACTIVE_KEY(NAME, NUM)); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ LSM_DEFINE_UNROLL(DEFINE_LSM_STATIC_CALL, NAME, RET, __VA_ARGS__) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef DEFINE_LSM_STATIC_CALL /* * Initialise a table of static calls for each LSM hook. * DEFINE_STATIC_CALL_NULL invocation above generates a key (STATIC_CALL_KEY) * and a trampoline (STATIC_CALL_TRAMP) which are used to call * __static_call_update when updating the static call. * * The static calls table is used by early LSMs, some architectures can fault on * unaligned accesses and the fault handling code may not be ready by then. * Thus, the static calls table should be aligned to avoid any unhandled faults * in early init. */ struct lsm_static_calls_table static_calls_table __ro_after_init __aligned(sizeof(u64)) = { #define INIT_LSM_STATIC_CALL(NUM, NAME) \ (struct lsm_static_call) { \ .key = &STATIC_CALL_KEY(LSM_STATIC_CALL(NAME, NUM)), \ .trampoline = LSM_HOOK_TRAMP(NAME, NUM), \ .active = &SECURITY_HOOK_ACTIVE_KEY(NAME, NUM), \ }, #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ .NAME = { \ LSM_DEFINE_UNROLL(INIT_LSM_STATIC_CALL, NAME) \ }, #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef INIT_LSM_STATIC_CALL }; /** * lsm_file_alloc - allocate a composite file blob * @file: the file that needs a blob * * Allocate the file blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_file_alloc(struct file *file) { if (!lsm_file_cache) { file->f_security = NULL; return 0; } file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); if (file->f_security == NULL) return -ENOMEM; return 0; } /** * lsm_blob_alloc - allocate a composite blob * @dest: the destination for the blob * @size: the size of the blob * @gfp: allocation type * * Allocate a blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp) { if (size == 0) { *dest = NULL; return 0; } *dest = kzalloc(size, gfp); if (*dest == NULL) return -ENOMEM; return 0; } /** * lsm_cred_alloc - allocate a composite cred blob * @cred: the cred that needs a blob * @gfp: allocation type * * Allocate the cred blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ int lsm_cred_alloc(struct cred *cred, gfp_t gfp) { return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp); } /** * lsm_inode_alloc - allocate a composite inode blob * @inode: the inode that needs a blob * @gfp: allocation flags * * Allocate the inode blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_inode_alloc(struct inode *inode, gfp_t gfp) { if (!lsm_inode_cache) { inode->i_security = NULL; return 0; } inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp); if (inode->i_security == NULL) return -ENOMEM; return 0; } /** * lsm_task_alloc - allocate a composite task blob * @task: the task that needs a blob * * Allocate the task blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ int lsm_task_alloc(struct task_struct *task) { return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL); } /** * lsm_ipc_alloc - allocate a composite ipc blob * @kip: the ipc that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_ipc_alloc(struct kern_ipc_perm *kip) { return lsm_blob_alloc(&kip->security, blob_sizes.lbs_ipc, GFP_KERNEL); } #ifdef CONFIG_KEYS /** * lsm_key_alloc - allocate a composite key blob * @key: the key that needs a blob * * Allocate the key blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_key_alloc(struct key *key) { return lsm_blob_alloc(&key->security, blob_sizes.lbs_key, GFP_KERNEL); } #endif /* CONFIG_KEYS */ /** * lsm_msg_msg_alloc - allocate a composite msg_msg blob * @mp: the msg_msg that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_msg_msg_alloc(struct msg_msg *mp) { return lsm_blob_alloc(&mp->security, blob_sizes.lbs_msg_msg, GFP_KERNEL); } /** * lsm_bdev_alloc - allocate a composite block_device blob * @bdev: the block_device that needs a blob * * Allocate the block_device blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bdev_alloc(struct block_device *bdev) { return lsm_blob_alloc(&bdev->bd_security, blob_sizes.lbs_bdev, GFP_KERNEL); } #ifdef CONFIG_BPF_SYSCALL /** * lsm_bpf_map_alloc - allocate a composite bpf_map blob * @map: the bpf_map that needs a blob * * Allocate the bpf_map blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bpf_map_alloc(struct bpf_map *map) { return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL); } /** * lsm_bpf_prog_alloc - allocate a composite bpf_prog blob * @prog: the bpf_prog that needs a blob * * Allocate the bpf_prog blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bpf_prog_alloc(struct bpf_prog *prog) { return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL); } /** * lsm_bpf_token_alloc - allocate a composite bpf_token blob * @token: the bpf_token that needs a blob * * Allocate the bpf_token blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bpf_token_alloc(struct bpf_token *token) { return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL); } #endif /* CONFIG_BPF_SYSCALL */ /** * lsm_superblock_alloc - allocate a composite superblock blob * @sb: the superblock that needs a blob * * Allocate the superblock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_superblock_alloc(struct super_block *sb) { return lsm_blob_alloc(&sb->s_security, blob_sizes.lbs_superblock, GFP_KERNEL); } /** * lsm_fill_user_ctx - Fill a user space lsm_ctx structure * @uctx: a userspace LSM context to be filled * @uctx_len: available uctx size (input), used uctx size (output) * @val: the new LSM context value * @val_len: the size of the new LSM context value * @id: LSM id * @flags: LSM defined flags * * Fill all of the fields in a userspace lsm_ctx structure. If @uctx is NULL * simply calculate the required size to output via @utc_len and return * success. * * Returns 0 on success, -E2BIG if userspace buffer is not large enough, * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated. */ int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, void *val, size_t val_len, u64 id, u64 flags) { struct lsm_ctx *nctx = NULL; size_t nctx_len; int rc = 0; nctx_len = ALIGN(struct_size(nctx, ctx, val_len), sizeof(void *)); if (nctx_len > *uctx_len) { rc = -E2BIG; goto out; } /* no buffer - return success/0 and set @uctx_len to the req size */ if (!uctx) goto out; nctx = kzalloc(nctx_len, GFP_KERNEL); if (nctx == NULL) { rc = -ENOMEM; goto out; } nctx->id = id; nctx->flags = flags; nctx->len = nctx_len; nctx->ctx_len = val_len; memcpy(nctx->ctx, val, val_len); if (copy_to_user(uctx, nctx, nctx_len)) rc = -EFAULT; out: kfree(nctx); *uctx_len = nctx_len; return rc; } /* * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and * can be accessed with: * * LSM_RET_DEFAULT(<hook_name>) * * The macros below define static constants for the default value of each * LSM hook. */ #define LSM_RET_DEFAULT(NAME) (NAME##_default) #define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME) #define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \ static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK /* * Hook list operation macros. * * call_void_hook: * This is a hook that does not return a value. * * call_int_hook: * This is a hook that returns a value. */ #define __CALL_STATIC_VOID(NUM, HOOK, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ } \ } while (0); #define call_void_hook(HOOK, ...) \ do { \ LSM_LOOP_UNROLL(__CALL_STATIC_VOID, HOOK, __VA_ARGS__); \ } while (0) #define __CALL_STATIC_INT(NUM, R, HOOK, LABEL, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ if (R != LSM_RET_DEFAULT(HOOK)) \ goto LABEL; \ } \ } while (0); #define call_int_hook(HOOK, ...) \ ({ \ __label__ OUT; \ int RC = LSM_RET_DEFAULT(HOOK); \ \ LSM_LOOP_UNROLL(__CALL_STATIC_INT, RC, HOOK, OUT, __VA_ARGS__); \ OUT: \ RC; \ }) #define lsm_for_each_hook(scall, NAME) \ for (scall = static_calls_table.NAME; \ scall - static_calls_table.NAME < MAX_LSM_COUNT; scall++) \ if (static_key_enabled(&scall->active->key)) /* Security operations */ /** * security_binder_set_context_mgr() - Check if becoming binder ctx mgr is ok * @mgr: task credentials of current binder process * * Check whether @mgr is allowed to be the binder context manager. * * Return: Return 0 if permission is granted. */ int security_binder_set_context_mgr(const struct cred *mgr) { return call_int_hook(binder_set_context_mgr, mgr); } /** * security_binder_transaction() - Check if a binder transaction is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to invoke a binder transaction call to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transaction(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transaction, from, to); } /** * security_binder_transfer_binder() - Check if a binder transfer is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to transfer a binder reference to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_binder(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transfer_binder, from, to); } /** * security_binder_transfer_file() - Check if a binder file xfer is allowed * @from: sending process * @to: receiving process * @file: file being transferred * * Check whether @from is allowed to transfer @file to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_file(const struct cred *from, const struct cred *to, const struct file *file) { return call_int_hook(binder_transfer_file, from, to, file); } /** * security_ptrace_access_check() - Check if tracing is allowed * @child: target process * @mode: PTRACE_MODE flags * * Check permission before allowing the current process to trace the @child * process. Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of tracing check * during an execve in the bprm_set_creds hook of binprm_security_ops if the * process is being traced and its security attributes would be changed by the * execve. * * Return: Returns 0 if permission is granted. */ int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { return call_int_hook(ptrace_access_check, child, mode); } /** * security_ptrace_traceme() - Check if tracing is allowed * @parent: tracing process * * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself to the * @parent process for tracing. * * Return: Returns 0 if permission is granted. */ int security_ptrace_traceme(struct task_struct *parent) { return call_int_hook(ptrace_traceme, parent); } /** * security_capget() - Get the capability sets for a process * @target: target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Get the @effective, @inheritable, and @permitted capability sets for the * @target process. The hook may also perform permission checking to determine * if the current process is allowed to see the capability sets of the @target * process. * * Return: Returns 0 if the capability sets were successfully obtained. */ int security_capget(const struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { return call_int_hook(capget, target, effective, inheritable, permitted); } /** * security_capset() - Set the capability sets for a process * @new: new credentials for the target process * @old: current credentials of the target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Set the @effective, @inheritable, and @permitted capability sets for the * current process. * * Return: Returns 0 and update @new if permission is granted. */ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { return call_int_hook(capset, new, old, effective, inheritable, permitted); } /** * security_capable() - Check if a process has the necessary capability * @cred: credentials to examine * @ns: user namespace * @cap: capability requested * @opts: capability check options * * Check whether the @tsk process has the @cap capability in the indicated * credentials. @cap contains the capability <include/linux/capability.h>. * @opts contains options for the capable check <include/linux/security.h>. * * Return: Returns 0 if the capability is granted. */ int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) { return call_int_hook(capable, cred, ns, cap, opts); } /** * security_quotactl() - Check if a quotactl() syscall is allowed for this fs * @cmds: commands * @type: type * @id: id * @sb: filesystem * * Check whether the quotactl syscall is allowed for this @sb. * * Return: Returns 0 if permission is granted. */ int security_quotactl(int cmds, int type, int id, const struct super_block *sb) { return call_int_hook(quotactl, cmds, type, id, sb); } /** * security_quota_on() - Check if QUOTAON is allowed for a dentry * @dentry: dentry * * Check whether QUOTAON is allowed for @dentry. * * Return: Returns 0 if permission is granted. */ int security_quota_on(struct dentry *dentry) { return call_int_hook(quota_on, dentry); } /** * security_syslog() - Check if accessing the kernel message ring is allowed * @type: SYSLOG_ACTION_* type * * Check permission before accessing the kernel message ring or changing * logging to the console. See the syslog(2) manual page for an explanation of * the @type values. * * Return: Return 0 if permission is granted. */ int security_syslog(int type) { return call_int_hook(syslog, type); } /** * security_settime64() - Check if changing the system time is allowed * @ts: new time * @tz: timezone * * Check permission to change the system time, struct timespec64 is defined in * <include/linux/time64.h> and timezone is defined in <include/linux/time.h>. * * Return: Returns 0 if permission is granted. */ int security_settime64(const struct timespec64 *ts, const struct timezone *tz) { return call_int_hook(settime, ts, tz); } /** * security_vm_enough_memory_mm() - Check if allocating a new mem map is allowed * @mm: mm struct * @pages: number of pages * * Check permissions for allocating a new virtual mapping. If all LSMs return * a positive value, __vm_enough_memory() will be called with cap_sys_admin * set. If at least one LSM returns 0 or negative, __vm_enough_memory() will be * called with cap_sys_admin cleared. * * Return: Returns 0 if permission is granted by the LSM infrastructure to the * caller. */ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { struct lsm_static_call *scall; int cap_sys_admin = 1; int rc; /* * The module will respond with 0 if it thinks the __vm_enough_memory() * call should be made with the cap_sys_admin set. If all of the modules * agree that it should be set it will. If any module thinks it should * not be set it won't. */ lsm_for_each_hook(scall, vm_enough_memory) { rc = scall->hl->hook.vm_enough_memory(mm, pages); if (rc < 0) { cap_sys_admin = 0; break; } } return __vm_enough_memory(mm, pages, cap_sys_admin); } /** * security_bprm_creds_for_exec() - Prepare the credentials for exec() * @bprm: binary program information * * If the setup in prepare_exec_creds did not setup @bprm->cred->security * properly for executing @bprm->file, update the LSM's portion of * @bprm->cred->security to be what commit_creds needs to install for the new * program. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm * contains the linux_binprm structure. * * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is * set. The result must be the same as without this flag even if the execution * will never really happen and @bprm will always be dropped. * * This hook must not change current->cred, only @bprm->cred. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_for_exec(struct linux_binprm *bprm) { return call_int_hook(bprm_creds_for_exec, bprm); } /** * security_bprm_creds_from_file() - Update linux_binprm creds based on file * @bprm: binary program information * @file: associated file * * If @file is setpcap, suid, sgid or otherwise marked to change privilege upon * exec, update @bprm->cred to reflect that change. This is called after * finding the binary that will be executed without an interpreter. This * ensures that the credentials will not be derived from a script that the * binary will need to reopen, which when reopend may end up being a completely * different file. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. The * hook must add to @bprm->per_clear any personality flags that should be * cleared from current->personality. @bprm contains the linux_binprm * structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) { return call_int_hook(bprm_creds_from_file, bprm, file); } /** * security_bprm_check() - Mediate binary handler search * @bprm: binary program information * * This hook mediates the point when a search for a binary handler will begin. * It allows a check against the @bprm->cred->security value which was set in * the preceding creds_for_exec call. The argv list and envp list are reliably * available in @bprm. This hook may be called multiple times during a single * execve. @bprm contains the linux_binprm structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_check(struct linux_binprm *bprm) { return call_int_hook(bprm_check_security, bprm); } /** * security_bprm_committing_creds() - Install creds for a process during exec() * @bprm: binary program information * * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials pointed to * by @current->cred and the information set in @bprm->cred by the * bprm_creds_for_exec hook. @bprm points to the linux_binprm structure. This * hook is a good place to perform state changes on the process such as closing * open file descriptors to which access will no longer be granted when the * attributes are changed. This is called immediately before commit_creds(). */ void security_bprm_committing_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committing_creds, bprm); } /** * security_bprm_committed_creds() - Tidy up after cred install during exec() * @bprm: binary program information * * Tidy up after the installation of the new security attributes of a process * being transformed by an execve operation. The new credentials have, by this * point, been set to @current->cred. @bprm points to the linux_binprm * structure. This hook is a good place to perform state changes on the * process such as clearing out non-inheritable signal state. This is called * immediately after commit_creds(). */ void security_bprm_committed_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committed_creds, bprm); } /** * security_fs_context_submount() - Initialise fc->security * @fc: new filesystem context * @reference: dentry reference for submount/remount * * Fill out the ->security field for a new fs_context. * * Return: Returns 0 on success or negative error code on failure. */ int security_fs_context_submount(struct fs_context *fc, struct super_block *reference) { return call_int_hook(fs_context_submount, fc, reference); } /** * security_fs_context_dup() - Duplicate a fs_context LSM blob * @fc: destination filesystem context * @src_fc: source filesystem context * * Allocate and attach a security structure to sc->security. This pointer is * initialised to NULL by the caller. @fc indicates the new filesystem context. * @src_fc indicates the original filesystem context. * * Return: Returns 0 on success or a negative error code on failure. */ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { return call_int_hook(fs_context_dup, fc, src_fc); } /** * security_fs_context_parse_param() - Configure a filesystem context * @fc: filesystem context * @param: filesystem parameter * * Userspace provided a parameter to configure a superblock. The LSM can * consume the parameter or return it to the caller for use elsewhere. * * Return: If the parameter is used by the LSM it should return 0, if it is * returned to the caller -ENOPARAM is returned, otherwise a negative * error code is returned. */ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct lsm_static_call *scall; int trc; int rc = -ENOPARAM; lsm_for_each_hook(scall, fs_context_parse_param) { trc = scall->hl->hook.fs_context_parse_param(fc, param); if (trc == 0) rc = 0; else if (trc != -ENOPARAM) return trc; } return rc; } /** * security_sb_alloc() - Allocate a super_block LSM blob * @sb: filesystem superblock * * Allocate and attach a security structure to the sb->s_security field. The * s_security field is initialized to NULL when the structure is allocated. * @sb contains the super_block structure to be modified. * * Return: Returns 0 if operation was successful. */ int security_sb_alloc(struct super_block *sb) { int rc = lsm_superblock_alloc(sb); if (unlikely(rc)) return rc; rc = call_int_hook(sb_alloc_security, sb); if (unlikely(rc)) security_sb_free(sb); return rc; } /** * security_sb_delete() - Release super_block LSM associated objects * @sb: filesystem superblock * * Release objects tied to a superblock (e.g. inodes). @sb contains the * super_block structure being released. */ void security_sb_delete(struct super_block *sb) { call_void_hook(sb_delete, sb); } /** * security_sb_free() - Free a super_block LSM blob * @sb: filesystem superblock * * Deallocate and clear the sb->s_security field. @sb contains the super_block * structure to be modified. */ void security_sb_free(struct super_block *sb) { call_void_hook(sb_free_security, sb); kfree(sb->s_security); sb->s_security = NULL; } /** * security_free_mnt_opts() - Free memory associated with mount options * @mnt_opts: LSM processed mount options * * Free memory associated with @mnt_ops. */ void security_free_mnt_opts(void **mnt_opts) { if (!*mnt_opts) return; call_void_hook(sb_free_mnt_opts, *mnt_opts); *mnt_opts = NULL; } EXPORT_SYMBOL(security_free_mnt_opts); /** * security_sb_eat_lsm_opts() - Consume LSM mount options * @options: mount options * @mnt_opts: LSM processed mount options * * Eat (scan @options) and save them in @mnt_opts. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_eat_lsm_opts(char *options, void **mnt_opts) { return call_int_hook(sb_eat_lsm_opts, options, mnt_opts); } EXPORT_SYMBOL(security_sb_eat_lsm_opts); /** * security_sb_mnt_opts_compat() - Check if new mount options are allowed * @sb: filesystem superblock * @mnt_opts: new mount options * * Determine if the new mount options in @mnt_opts are allowed given the * existing mounted filesystem at @sb. @sb superblock being compared. * * Return: Returns 0 if options are compatible. */ int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_mnt_opts_compat, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_mnt_opts_compat); /** * security_sb_remount() - Verify no incompatible mount changes during remount * @sb: filesystem superblock * @mnt_opts: (re)mount options * * Extracts security system specific mount options and verifies no changes are * being made to those options. * * Return: Returns 0 if permission is granted. */ int security_sb_remount(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_remount, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_remount); /** * security_sb_kern_mount() - Check if a kernel mount is allowed * @sb: filesystem superblock * * Mount this @sb if allowed by permissions. * * Return: Returns 0 if permission is granted. */ int security_sb_kern_mount(const struct super_block *sb) { return call_int_hook(sb_kern_mount, sb); } /** * security_sb_show_options() - Output the mount options for a superblock * @m: output file * @sb: filesystem superblock * * Show (print on @m) mount options for this @sb. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_show_options(struct seq_file *m, struct super_block *sb) { return call_int_hook(sb_show_options, m, sb); } /** * security_sb_statfs() - Check if accessing fs stats is allowed * @dentry: superblock handle * * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. @dentry is a handle on the superblock for the filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_statfs(struct dentry *dentry) { return call_int_hook(sb_statfs, dentry); } /** * security_sb_mount() - Check permission for mounting a filesystem * @dev_name: filesystem backing device * @path: mount point * @type: filesystem type * @flags: mount flags * @data: filesystem specific data * * Check permission before an object specified by @dev_name is mounted on the * mount point named by @nd. For an ordinary mount, @dev_name identifies a * device if the file system type requires a device. For a remount * (@flags & MS_REMOUNT), @dev_name is irrelevant. For a loopback/bind mount * (@flags & MS_BIND), @dev_name identifies the pathname of the object being * mounted. * * Return: Returns 0 if permission is granted. */ int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return call_int_hook(sb_mount, dev_name, path, type, flags, data); } /** * security_sb_umount() - Check permission for unmounting a filesystem * @mnt: mounted filesystem * @flags: unmount flags * * Check permission before the @mnt file system is unmounted. * * Return: Returns 0 if permission is granted. */ int security_sb_umount(struct vfsmount *mnt, int flags) { return call_int_hook(sb_umount, mnt, flags); } /** * security_sb_pivotroot() - Check permissions for pivoting the rootfs * @old_path: new location for current rootfs * @new_path: location of the new rootfs * * Check permission before pivoting the root filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return call_int_hook(sb_pivotroot, old_path, new_path); } /** * security_sb_set_mnt_opts() - Set the mount options for a filesystem * @sb: filesystem superblock * @mnt_opts: binary mount options * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Set the security relevant mount options used for a superblock. * * Return: Returns 0 on success, error on failure. */ int security_sb_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { struct lsm_static_call *scall; int rc = mnt_opts ? -EOPNOTSUPP : LSM_RET_DEFAULT(sb_set_mnt_opts); lsm_for_each_hook(scall, sb_set_mnt_opts) { rc = scall->hl->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags, set_kern_flags); if (rc != LSM_RET_DEFAULT(sb_set_mnt_opts)) break; } return rc; } EXPORT_SYMBOL(security_sb_set_mnt_opts); /** * security_sb_clone_mnt_opts() - Duplicate superblock mount options * @oldsb: source superblock * @newsb: destination superblock * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Copy all security options from a given superblock to another. * * Return: Returns 0 on success, error on failure. */ int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) { return call_int_hook(sb_clone_mnt_opts, oldsb, newsb, kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); /** * security_move_mount() - Check permissions for moving a mount * @from_path: source mount point * @to_path: destination mount point * * Check permission before a mount is moved. * * Return: Returns 0 if permission is granted. */ int security_move_mount(const struct path *from_path, const struct path *to_path) { return call_int_hook(move_mount, from_path, to_path); } /** * security_path_notify() - Check if setting a watch is allowed * @path: file path * @mask: event mask * @obj_type: file path type * * Check permissions before setting a watch on events as defined by @mask, on * an object at @path, whose type is defined by @obj_type. * * Return: Returns 0 if permission is granted. */ int security_path_notify(const struct path *path, u64 mask, unsigned int obj_type) { return call_int_hook(path_notify, path, mask, obj_type); } /** * security_inode_alloc() - Allocate an inode LSM blob * @inode: the inode * @gfp: allocation flags * * Allocate and attach a security structure to @inode->i_security. The * i_security field is initialized to NULL when the inode structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_inode_alloc(struct inode *inode, gfp_t gfp) { int rc = lsm_inode_alloc(inode, gfp); if (unlikely(rc)) return rc; rc = call_int_hook(inode_alloc_security, inode); if (unlikely(rc)) security_inode_free(inode); return rc; } static void inode_free_by_rcu(struct rcu_head *head) { /* The rcu head is at the start of the inode blob */ call_void_hook(inode_free_security_rcu, head); kmem_cache_free(lsm_inode_cache, head); } /** * security_inode_free() - Free an inode's LSM blob * @inode: the inode * * Release any LSM resources associated with @inode, although due to the * inode's RCU protections it is possible that the resources will not be * fully released until after the current RCU grace period has elapsed. * * It is important for LSMs to note that despite being present in a call to * security_inode_free(), @inode may still be referenced in a VFS path walk * and calls to security_inode_permission() may be made during, or after, * a call to security_inode_free(). For this reason the inode->i_security * field is released via a call_rcu() callback and any LSMs which need to * retain inode state for use in security_inode_permission() should only * release that state in the inode_free_security_rcu() LSM hook callback. */ void security_inode_free(struct inode *inode) { call_void_hook(inode_free_security, inode); if (!inode->i_security) return; call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu); } /** * security_dentry_init_security() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @xattr_name: name of the security/LSM xattr * @lsmctx: pointer to the resulting LSM context * * Compute a context for a dentry as the inode is not yet available since NFSv4 * has no label backed by an EA anyway. It is important to note that * @xattr_name does not need to be free'd by the caller, it is a static string. * * Return: Returns 0 on success, negative values on failure. */ int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, struct lsm_context *lsmctx) { return call_int_hook(dentry_init_security, dentry, mode, name, xattr_name, lsmctx); } EXPORT_SYMBOL(security_dentry_init_security); /** * security_dentry_create_files_as() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @old: creds to use for LSM context calculations * @new: creds to modify * * Compute a context for a dentry as the inode is not yet available and set * that context in passed in creds so that new files are created using that * context. Context is calculated using the passed in creds and not the creds * of the caller. * * Return: Returns 0 on success, error on failure. */ int security_dentry_create_files_as(struct dentry *dentry, int mode, const struct qstr *name, const struct cred *old, struct cred *new) { return call_int_hook(dentry_create_files_as, dentry, mode, name, old, new); } EXPORT_SYMBOL(security_dentry_create_files_as); /** * security_inode_init_security() - Initialize an inode's LSM context * @inode: the inode * @dir: parent directory * @qstr: last component of the pathname * @initxattrs: callback function to write xattrs * @fs_data: filesystem specific data * * Obtain the security attribute name suffix and value to set on a newly * created inode and set up the incore security field for the new inode. This * hook is called by the fs code as part of the inode creation transaction and * provides for atomic labeling of the inode, unlike the post_create/mkdir/... * hooks called by the VFS. * * The hook function is expected to populate the xattrs array, by calling * lsm_get_xattr_slot() to retrieve the slots reserved by the security module * with the lbs_xattr_count field of the lsm_blob_sizes structure. For each * slot, the hook function should set ->name to the attribute name suffix * (e.g. selinux), to allocate ->value (will be freed by the caller) and set it * to the attribute value, to set ->value_len to the length of the value. If * the security module does not use security attributes or does not wish to put * a security attribute on this particular inode, then it should return * -EOPNOTSUPP to skip this processing. * * Return: Returns 0 if the LSM successfully initialized all of the inode * security attributes that are required, negative values otherwise. */ int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const initxattrs initxattrs, void *fs_data) { struct lsm_static_call *scall; struct xattr *new_xattrs = NULL; int ret = -EOPNOTSUPP, xattr_count = 0; if (unlikely(IS_PRIVATE(inode))) return 0; if (!blob_sizes.lbs_xattr_count) return 0; if (initxattrs) { /* Allocate +1 as terminator. */ new_xattrs = kcalloc(blob_sizes.lbs_xattr_count + 1, sizeof(*new_xattrs), GFP_NOFS); if (!new_xattrs) return -ENOMEM; } lsm_for_each_hook(scall, inode_init_security) { ret = scall->hl->hook.inode_init_security(inode, dir, qstr, new_xattrs, &xattr_count); if (ret && ret != -EOPNOTSUPP) goto out; /* * As documented in lsm_hooks.h, -EOPNOTSUPP in this context * means that the LSM is not willing to provide an xattr, not * that it wants to signal an error. Thus, continue to invoke * the remaining LSMs. */ } /* If initxattrs() is NULL, xattr_count is zero, skip the call. */ if (!xattr_count) goto out; ret = initxattrs(inode, new_xattrs, fs_data); out: for (; xattr_count > 0; xattr_count--) kfree(new_xattrs[xattr_count - 1].value); kfree(new_xattrs); return (ret == -EOPNOTSUPP) ? 0 : ret; } EXPORT_SYMBOL(security_inode_init_security); /** * security_inode_init_security_anon() - Initialize an anonymous inode * @inode: the inode * @name: the anonymous inode class * @context_inode: an optional related inode * * Set up the incore security field for the new anonymous inode and return * whether the inode creation is permitted by the security module or not. * * Return: Returns 0 on success, -EACCES if the security module denies the * creation of this inode, or another -errno upon other errors. */ int security_inode_init_security_anon(struct inode *inode, const struct qstr *name, const struct inode *context_inode) { return call_int_hook(inode_init_security_anon, inode, name, context_inode); } #ifdef CONFIG_SECURITY_PATH /** * security_path_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a file. Note that this hook is called even * if mknod operation is being done for a regular file. * * Return: Returns 0 if permission is granted. */ int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mknod, dir, dentry, mode, dev); } EXPORT_SYMBOL(security_path_mknod); /** * security_path_post_mknod() - Update inode security after reg file creation * @idmap: idmap of the mount * @dentry: new file * * Update inode security field after a regular file has been created. */ void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(path_post_mknod, idmap, dentry); } /** * security_path_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory. * * Return: Returns 0 if permission is granted. */ int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mkdir, dir, dentry, mode); } EXPORT_SYMBOL(security_path_mkdir); /** * security_path_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to remove * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_path_rmdir(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_rmdir, dir, dentry); } /** * security_path_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_unlink(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_unlink, dir, dentry); } EXPORT_SYMBOL(security_path_unlink); /** * security_path_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: file pathname * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_symlink, dir, dentry, old_name); } /** * security_path_link - Check if creating a hard link is allowed * @old_dentry: existing file * @new_dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(path_link, old_dentry, new_dir, new_dentry); } /** * security_path_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; return call_int_hook(path_rename, old_dir, old_dentry, new_dir, new_dentry, flags); } EXPORT_SYMBOL(security_path_rename); /** * security_path_truncate() - Check if truncating a file is allowed * @path: file * * Check permission before truncating the file indicated by path. Note that * truncation permissions may also be checked based on already opened files, * using the security_file_truncate() hook. * * Return: Returns 0 if permission is granted. */ int security_path_truncate(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_truncate, path); } /** * security_path_chmod() - Check if changing the file's mode is allowed * @path: file * @mode: new mode * * Check for permission to change a mode of the file @path. The new mode is * specified in @mode which is a bitmask of constants from * <include/uapi/linux/stat.h>. * * Return: Returns 0 if permission is granted. */ int security_path_chmod(const struct path *path, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chmod, path, mode); } /** * security_path_chown() - Check if changing the file's owner/group is allowed * @path: file * @uid: file owner * @gid: file group * * Check for permission to change owner/group of a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chown, path, uid, gid); } /** * security_path_chroot() - Check if changing the root directory is allowed * @path: directory * * Check for permission to change root directory. * * Return: Returns 0 if permission is granted. */ int security_path_chroot(const struct path *path) { return call_int_hook(path_chroot, path); } #endif /* CONFIG_SECURITY_PATH */ /** * security_inode_create() - Check if creating a file is allowed * @dir: the parent directory * @dentry: the file being created * @mode: requested file mode * * Check permission to create a regular file. * * Return: Returns 0 if permission is granted. */ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_create, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_create); /** * security_inode_post_create_tmpfile() - Update inode security of new tmpfile * @idmap: idmap of the mount * @inode: inode of the new tmpfile * * Update inode security data after a tmpfile has been created. */ void security_inode_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode) { if (unlikely(IS_PRIVATE(inode))) return; call_void_hook(inode_post_create_tmpfile, idmap, inode); } /** * security_inode_link() - Check if creating a hard link is allowed * @old_dentry: existing file * @dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(inode_link, old_dentry, dir, new_dentry); } /** * security_inode_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_unlink(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_unlink, dir, dentry); } /** * security_inode_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: existing filename * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_symlink, dir, dentry, old_name); } /** * security_inode_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory * associated with inode structure @dir. * * Return: Returns 0 if permission is granted. */ int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mkdir, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_mkdir); /** * security_inode_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to be removed * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_rmdir, dir, dentry); } /** * security_inode_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a special file (or a socket or a fifo file * created via the mknod system call). Note that if mknod operation is being * done for a regular file, then the create hook will be called and not this * hook. * * Return: Returns 0 if permission is granted. */ int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mknod, dir, dentry, mode, dev); } /** * security_inode_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; if (flags & RENAME_EXCHANGE) { int err = call_int_hook(inode_rename, new_dir, new_dentry, old_dir, old_dentry); if (err) return err; } return call_int_hook(inode_rename, old_dir, old_dentry, new_dir, new_dentry); } /** * security_inode_readlink() - Check if reading a symbolic link is allowed * @dentry: link * * Check the permission to read the symbolic link. * * Return: Returns 0 if permission is granted. */ int security_inode_readlink(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_readlink, dentry); } /** * security_inode_follow_link() - Check if following a symbolic link is allowed * @dentry: link dentry * @inode: link inode * @rcu: true if in RCU-walk mode * * Check permission to follow a symbolic link when looking up a pathname. If * @rcu is true, @inode is not stable. * * Return: Returns 0 if permission is granted. */ int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_follow_link, dentry, inode, rcu); } /** * security_inode_permission() - Check if accessing an inode is allowed * @inode: inode * @mask: access mask * * Check permission before accessing an inode. This hook is called by the * existing Linux permission function, so a security module can use it to * provide additional checking for existing Linux permission checks. Notice * that this hook is called when a file is opened (as well as many other * operations), whereas the file_security_ops permission hook is called when * the actual read/write operations are performed. * * Return: Returns 0 if permission is granted. */ int security_inode_permission(struct inode *inode, int mask) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_permission, inode, mask); } /** * security_inode_setattr() - Check if setting file attributes is allowed * @idmap: idmap of the mount * @dentry: file * @attr: new attributes * * Check permission before setting file attributes. Note that the kernel call * to notify_change is performed from several locations, whenever file * attributes change (such as when a file is truncated, chown/chmod operations, * transferring disk quotas, etc). * * Return: Returns 0 if permission is granted. */ int security_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_setattr, idmap, dentry, attr); } EXPORT_SYMBOL_GPL(security_inode_setattr); /** * security_inode_post_setattr() - Update the inode after a setattr operation * @idmap: idmap of the mount * @dentry: file * @ia_valid: file attributes set * * Update inode security field after successful setting file attributes. */ void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setattr, idmap, dentry, ia_valid); } /** * security_inode_getattr() - Check if getting file attributes is allowed * @path: file * * Check permission before obtaining file attributes. * * Return: Returns 0 if permission is granted. */ int security_inode_getattr(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(inode_getattr, path); } /** * security_inode_setxattr() - Check if setting file xattrs is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * @value: xattr value * @size: size of xattr value * @flags: flags * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_setxattr(dentry, name, value, size, flags); if (rc) return rc; } return call_int_hook(inode_setxattr, idmap, dentry, name, value, size, flags); } /** * security_inode_set_acl() - Check if setting posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Check permission before setting posix acls, the posix acls in @kacl are * identified by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_set_acl, idmap, dentry, acl_name, kacl); } /** * security_inode_post_set_acl() - Update inode security from posix acls set * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Update inode security data after successfully setting posix acls on @dentry. * The posix acls in @kacl are identified by @acl_name. */ void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_set_acl, dentry, acl_name, kacl); } /** * security_inode_get_acl() - Check if reading posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before getting osix acls, the posix acls are identified by * @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_get_acl, idmap, dentry, acl_name); } /** * security_inode_remove_acl() - Check if removing a posix acl is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before removing posix acls, the posix acls are identified * by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_remove_acl() - Update inode security after rm posix acls * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Update inode security data after successfully removing posix acls on * @dentry in @idmap. The posix acls are identified by @acl_name. */ void security_inode_post_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_setxattr() - Update the inode after a setxattr operation * @dentry: file * @name: xattr name * @value: xattr value * @size: xattr value size * @flags: flags * * Update inode security field after successful setxattr operation. */ void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setxattr, dentry, name, value, size, flags); } /** * security_inode_getxattr() - Check if xattr access is allowed * @dentry: file * @name: xattr name * * Check permission before obtaining the extended attributes identified by * @name for @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_getxattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_getxattr, dentry, name); } /** * security_inode_listxattr() - Check if listing xattrs is allowed * @dentry: file * * Check permission before obtaining the list of extended attribute names for * @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_listxattr(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_listxattr, dentry); } /** * security_inode_removexattr() - Check if removing an xattr is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_removexattr(idmap, dentry, name); if (rc) return rc; } return call_int_hook(inode_removexattr, idmap, dentry, name); } /** * security_inode_post_removexattr() - Update the inode after a removexattr op * @dentry: file * @name: xattr name * * Update the inode after a successful removexattr operation. */ void security_inode_post_removexattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_removexattr, dentry, name); } /** * security_inode_file_setattr() - check if setting fsxattr is allowed * @dentry: file to set filesystem extended attributes on * @fa: extended attributes to set on the inode * * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on * inode * * Return: Returns 0 if permission is granted. */ int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa) { return call_int_hook(inode_file_setattr, dentry, fa); } /** * security_inode_file_getattr() - check if retrieving fsxattr is allowed * @dentry: file to retrieve filesystem extended attributes from * @fa: extended attributes to get * * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on * inode * * Return: Returns 0 if permission is granted. */ int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa) { return call_int_hook(inode_file_getattr, dentry, fa); } /** * security_inode_need_killpriv() - Check if security_inode_killpriv() required * @dentry: associated dentry * * Called when an inode has been changed to determine if * security_inode_killpriv() should be called. * * Return: Return <0 on error to abort the inode change operation, return 0 if * security_inode_killpriv() does not need to be called, return >0 if * security_inode_killpriv() does need to be called. */ int security_inode_need_killpriv(struct dentry *dentry) { return call_int_hook(inode_need_killpriv, dentry); } /** * security_inode_killpriv() - The setuid bit is removed, update LSM state * @idmap: idmap of the mount * @dentry: associated dentry * * The @dentry's setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. * * Return: Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. */ int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry) { return call_int_hook(inode_killpriv, idmap, dentry); } /** * security_inode_getsecurity() - Get the xattr security label of an inode * @idmap: idmap of the mount * @inode: inode * @name: xattr name * @buffer: security label buffer * @alloc: allocation flag * * Retrieve a copy of the extended attribute representation of the security * label associated with @name for @inode via @buffer. Note that @name is the * remainder of the attribute name after the security prefix has been removed. * @alloc is used to specify if the call should return a value via the buffer * or just the value length. * * Return: Returns size of buffer on success. */ int security_inode_getsecurity(struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_getsecurity); return call_int_hook(inode_getsecurity, idmap, inode, name, buffer, alloc); } /** * security_inode_setsecurity() - Set the xattr security label of an inode * @inode: inode * @name: xattr name * @value: security label * @size: length of security label * @flags: flags * * Set the security label associated with @name for @inode from the extended * attribute value @value. @size indicates the size of the @value in bytes. * @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. Note that @name is the * remainder of the attribute name after the security. prefix has been removed. * * Return: Returns 0 on success. */ int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_setsecurity); return call_int_hook(inode_setsecurity, inode, name, value, size, flags); } /** * security_inode_listsecurity() - List the xattr security label names * @inode: inode * @buffer: buffer * @buffer_size: size of buffer * * Copy the extended attribute names for the security labels associated with * @inode into @buffer. The maximum size of @buffer is specified by * @buffer_size. @buffer may be NULL to request the size of the buffer * required. * * Return: Returns number of bytes used/required on success. */ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_listsecurity, inode, buffer, buffer_size); } EXPORT_SYMBOL(security_inode_listsecurity); /** * security_inode_getlsmprop() - Get an inode's LSM data * @inode: inode * @prop: lsm specific information to return * * Get the lsm specific information associated with the node. */ void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop) { call_void_hook(inode_getlsmprop, inode, prop); } /** * security_inode_copy_up() - Create new creds for an overlayfs copy-up op * @src: union dentry of copy-up file * @new: newly created creds * * A file is about to be copied up from lower layer to upper layer of overlay * filesystem. Security module can prepare a set of new creds and modify as * need be and return new creds. Caller will switch to new creds temporarily to * create new file and release newly allocated creds. * * Return: Returns 0 on success or a negative error code on error. */ int security_inode_copy_up(struct dentry *src, struct cred **new) { return call_int_hook(inode_copy_up, src, new); } EXPORT_SYMBOL(security_inode_copy_up); /** * security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op * @src: union dentry of copy-up file * @name: xattr name * * Filter the xattrs being copied up when a unioned file is copied up from a * lower layer to the union/overlay layer. The caller is responsible for * reading and writing the xattrs, this hook is merely a filter. * * Return: Returns 0 to accept the xattr, -ECANCELED to discard the xattr, * -EOPNOTSUPP if the security module does not know about attribute, * or a negative error code to abort the copy up. */ int security_inode_copy_up_xattr(struct dentry *src, const char *name) { int rc; rc = call_int_hook(inode_copy_up_xattr, src, name); if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr)) return rc; return LSM_RET_DEFAULT(inode_copy_up_xattr); } EXPORT_SYMBOL(security_inode_copy_up_xattr); /** * security_inode_setintegrity() - Set the inode's integrity data * @inode: inode * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a inode with LSMs. * LSMs should free the previously saved data if @value is NULL. * * Return: Returns 0 on success, negative values on failure. */ int security_inode_setintegrity(const struct inode *inode, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(inode_setintegrity, inode, type, value, size); } EXPORT_SYMBOL(security_inode_setintegrity); /** * security_kernfs_init_security() - Init LSM context for a kernfs node * @kn_dir: parent kernfs node * @kn: the kernfs node to initialize * * Initialize the security context of a newly created kernfs node based on its * own and its parent's attributes. * * Return: Returns 0 if permission is granted. */ int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { return call_int_hook(kernfs_init_security, kn_dir, kn); } /** * security_file_permission() - Check file permissions * @file: file * @mask: requested permissions * * Check file permissions before accessing an open file. This hook is called * by various operations that read or write files. A security module can use * this hook to perform additional checking on these operations, e.g. to * revalidate permissions on use to support privilege bracketing or policy * changes. Notice that this hook is used when the actual read/write * operations are performed, whereas the inode_security_ops hook is called when * a file is opened (as well as many other operations). Although this hook can * be used to revalidate permissions for various system call operations that * read or write files, it does not address the revalidation of permissions for * memory-mapped files. Security modules must handle this separately if they * need such revalidation. * * Return: Returns 0 if permission is granted. */ int security_file_permission(struct file *file, int mask) { return call_int_hook(file_permission, file, mask); } /** * security_file_alloc() - Allocate and init a file's LSM blob * @file: the file * * Allocate and attach a security structure to the file->f_security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if the hook is successful and permission is granted. */ int security_file_alloc(struct file *file) { int rc = lsm_file_alloc(file); if (rc) return rc; rc = call_int_hook(file_alloc_security, file); if (unlikely(rc)) security_file_free(file); return rc; } /** * security_file_release() - Perform actions before releasing the file ref * @file: the file * * Perform actions before releasing the last reference to a file. */ void security_file_release(struct file *file) { call_void_hook(file_release, file); } /** * security_file_free() - Free a file's LSM blob * @file: the file * * Deallocate and free any security structures stored in file->f_security. */ void security_file_free(struct file *file) { void *blob; call_void_hook(file_free_security, file); blob = file->f_security; if (blob) { file->f_security = NULL; kmem_cache_free(lsm_file_cache, blob); } } /** * security_file_ioctl() - Check if an ioctl is allowed * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Check permission for an ioctl operation on @file. Note that @arg sometimes * represents a user space pointer; in other cases, it may be a simple integer * value. When @arg represents a user space pointer, it should never be used * by the security module. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl); /** * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Compat version of security_file_ioctl() that correctly handles 32-bit * processes running on 64-bit kernels. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl_compat, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl_compat); static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { /* * Does we have PROT_READ and does the application expect * it to imply PROT_EXEC? If not, nothing to talk about... */ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) return prot; if (!(current->personality & READ_IMPLIES_EXEC)) return prot; /* * if that's an anonymous mapping, let it. */ if (!file) return prot | PROT_EXEC; /* * ditto if it's not on noexec mount, except that on !MMU we need * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case */ if (!path_noexec(&file->f_path)) { #ifndef CONFIG_MMU if (file->f_op->mmap_capabilities) { unsigned caps = file->f_op->mmap_capabilities(file); if (!(caps & NOMMU_MAP_EXEC)) return prot; } #endif return prot | PROT_EXEC; } /* anything on noexec mount won't get PROT_EXEC */ return prot; } /** * security_mmap_file() - Check if mmap'ing a file is allowed * @file: file * @prot: protection applied by the kernel * @flags: flags * * Check permissions for a mmap operation. The @file may be NULL, e.g. if * mapping anonymous memory. * * Return: Returns 0 if permission is granted. */ int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { return call_int_hook(mmap_file, file, prot, mmap_prot(file, prot), flags); } /** * security_mmap_addr() - Check if mmap'ing an address is allowed * @addr: address * * Check permissions for a mmap operation at @addr. * * Return: Returns 0 if permission is granted. */ int security_mmap_addr(unsigned long addr) { return call_int_hook(mmap_addr, addr); } /** * security_file_mprotect() - Check if changing memory protections is allowed * @vma: memory region * @reqprot: application requested protection * @prot: protection applied by the kernel * * Check permissions before changing memory access permissions. * * Return: Returns 0 if permission is granted. */ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return call_int_hook(file_mprotect, vma, reqprot, prot); } /** * security_file_lock() - Check if a file lock is allowed * @file: file * @cmd: lock operation (e.g. F_RDLCK, F_WRLCK) * * Check permission before performing file locking operations. Note the hook * mediates both flock and fcntl style locks. * * Return: Returns 0 if permission is granted. */ int security_file_lock(struct file *file, unsigned int cmd) { return call_int_hook(file_lock, file, cmd); } /** * security_file_fcntl() - Check if fcntl() op is allowed * @file: file * @cmd: fcntl command * @arg: command argument * * Check permission before allowing the file operation specified by @cmd from * being performed on the file @file. Note that @arg sometimes represents a * user space pointer; in other cases, it may be a simple integer value. When * @arg represents a user space pointer, it should never be used by the * security module. * * Return: Returns 0 if permission is granted. */ int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_fcntl, file, cmd, arg); } /** * security_file_set_fowner() - Set the file owner info in the LSM blob * @file: the file * * Save owner security information (typically from current->security) in * file->f_security for later use by the send_sigiotask hook. * * This hook is called with file->f_owner.lock held. * * Return: Returns 0 on success. */ void security_file_set_fowner(struct file *file) { call_void_hook(file_set_fowner, file); } /** * security_file_send_sigiotask() - Check if sending SIGIO/SIGURG is allowed * @tsk: target task * @fown: signal sender * @sig: signal to be sent, SIGIO is sent if 0 * * Check permission for the file owner @fown to send SIGIO or SIGURG to the * process @tsk. Note that this hook is sometimes called from interrupt. Note * that the fown_struct, @fown, is never outside the context of a struct file, * so the file structure (and associated security information) can always be * obtained: container_of(fown, struct file, f_owner). * * Return: Returns 0 if permission is granted. */ int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig) { return call_int_hook(file_send_sigiotask, tsk, fown, sig); } /** * security_file_receive() - Check if receiving a file via IPC is allowed * @file: file being received * * This hook allows security modules to control the ability of a process to * receive an open file descriptor via socket IPC. * * Return: Returns 0 if permission is granted. */ int security_file_receive(struct file *file) { return call_int_hook(file_receive, file); } /** * security_file_open() - Save open() time state for late use by the LSM * @file: * * Save open-time permission checking state for later use upon file_permission, * and recheck access if anything has changed since inode_permission. * * We can check if a file is opened for execution (e.g. execve(2) call), either * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags & * __FMODE_EXEC . * * Return: Returns 0 if permission is granted. */ int security_file_open(struct file *file) { return call_int_hook(file_open, file); } /** * security_file_post_open() - Evaluate a file after it has been opened * @file: the file * @mask: access mask * * Evaluate an opened file and the access mask requested with open(). The hook * is useful for LSMs that require the file content to be available in order to * make decisions. * * Return: Returns 0 if permission is granted. */ int security_file_post_open(struct file *file, int mask) { return call_int_hook(file_post_open, file, mask); } EXPORT_SYMBOL_GPL(security_file_post_open); /** * security_file_truncate() - Check if truncating a file is allowed * @file: file * * Check permission before truncating a file, i.e. using ftruncate. Note that * truncation permission may also be checked based on the path, using the * @path_truncate hook. * * Return: Returns 0 if permission is granted. */ int security_file_truncate(struct file *file) { return call_int_hook(file_truncate, file); } /** * security_task_alloc() - Allocate a task's LSM blob * @task: the task * @clone_flags: flags indicating what is being shared * * Handle allocation of task-related resources. * * Return: Returns a zero on success, negative values on failure. */ int security_task_alloc(struct task_struct *task, u64 clone_flags) { int rc = lsm_task_alloc(task); if (rc) return rc; rc = call_int_hook(task_alloc, task, clone_flags); if (unlikely(rc)) security_task_free(task); return rc; } /** * security_task_free() - Free a task's LSM blob and related resources * @task: task * * Handle release of task-related resources. Note that this can be called from * interrupt context. */ void security_task_free(struct task_struct *task) { call_void_hook(task_free, task); kfree(task->security); task->security = NULL; } /** * security_cred_alloc_blank() - Allocate the min memory to allow cred_transfer * @cred: credentials * @gfp: gfp flags * * Only allocate sufficient memory and attach to @cred such that * cred_transfer() will not get ENOMEM. * * Return: Returns 0 on success, negative values on failure. */ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { int rc = lsm_cred_alloc(cred, gfp); if (rc) return rc; rc = call_int_hook(cred_alloc_blank, cred, gfp); if (unlikely(rc)) security_cred_free(cred); return rc; } /** * security_cred_free() - Free the cred's LSM blob and associated resources * @cred: credentials * * Deallocate and clear the cred->security field in a set of credentials. */ void security_cred_free(struct cred *cred) { /* * There is a failure case in prepare_creds() that * may result in a call here with ->security being NULL. */ if (unlikely(cred->security == NULL)) return; call_void_hook(cred_free, cred); kfree(cred->security); cred->security = NULL; } /** * security_prepare_creds() - Prepare a new set of credentials * @new: new credentials * @old: original credentials * @gfp: gfp flags * * Prepare a new set of credentials by copying the data from the old set. * * Return: Returns 0 on success, negative values on failure. */ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { int rc = lsm_cred_alloc(new, gfp); if (rc) return rc; rc = call_int_hook(cred_prepare, new, old, gfp); if (unlikely(rc)) security_cred_free(new); return rc; } /** * security_transfer_creds() - Transfer creds * @new: target credentials * @old: original credentials * * Transfer data from original creds to new creds. */ void security_transfer_creds(struct cred *new, const struct cred *old) { call_void_hook(cred_transfer, new, old); } /** * security_cred_getsecid() - Get the secid from a set of credentials * @c: credentials * @secid: secid value * * Retrieve the security identifier of the cred structure @c. In case of * failure, @secid will be set to zero. */ void security_cred_getsecid(const struct cred *c, u32 *secid) { *secid = 0; call_void_hook(cred_getsecid, c, secid); } EXPORT_SYMBOL(security_cred_getsecid); /** * security_cred_getlsmprop() - Get the LSM data from a set of credentials * @c: credentials * @prop: destination for the LSM data * * Retrieve the security data of the cred structure @c. In case of * failure, @prop will be cleared. */ void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(cred_getlsmprop, c, prop); } EXPORT_SYMBOL(security_cred_getlsmprop); /** * security_kernel_act_as() - Set the kernel credentials to act as secid * @new: credentials * @secid: secid * * Set the credentials for a kernel service to act as (subjective context). * The current task must be the one that nominated @secid. * * Return: Returns 0 if successful. */ int security_kernel_act_as(struct cred *new, u32 secid) { return call_int_hook(kernel_act_as, new, secid); } /** * security_kernel_create_files_as() - Set file creation context using an inode * @new: target credentials * @inode: reference inode * * Set the file creation context in a set of credentials to be the same as the * objective context of the specified inode. The current task must be the one * that nominated @inode. * * Return: Returns 0 if successful. */ int security_kernel_create_files_as(struct cred *new, struct inode *inode) { return call_int_hook(kernel_create_files_as, new, inode); } /** * security_kernel_module_request() - Check if loading a module is allowed * @kmod_name: module name * * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. * * Return: Returns 0 if successful. */ int security_kernel_module_request(char *kmod_name) { return call_int_hook(kernel_module_request, kmod_name); } /** * security_kernel_read_file() - Read a file specified by userspace * @file: file * @id: file identifier * @contents: trust if security_kernel_post_read_file() will be called * * Read a file specified by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_read_file(struct file *file, enum kernel_read_file_id id, bool contents) { return call_int_hook(kernel_read_file, file, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_read_file); /** * security_kernel_post_read_file() - Read a file specified by userspace * @file: file * @buf: file contents * @size: size of file contents * @id: file identifier * * Read a file specified by userspace. This must be paired with a prior call * to security_kernel_read_file() call that indicated this hook would also be * called, see security_kernel_read_file() for more information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) { return call_int_hook(kernel_post_read_file, file, buf, size, id); } EXPORT_SYMBOL_GPL(security_kernel_post_read_file); /** * security_kernel_load_data() - Load data provided by userspace * @id: data identifier * @contents: true if security_kernel_post_load_data() will be called * * Load data provided by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_load_data(enum kernel_load_data_id id, bool contents) { return call_int_hook(kernel_load_data, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_load_data); /** * security_kernel_post_load_data() - Load userspace data from a non-file source * @buf: data * @size: size of data * @id: data identifier * @description: text description of data, specific to the id value * * Load data provided by a non-file source (usually userspace buffer). This * must be paired with a prior security_kernel_load_data() call that indicated * this hook would also be called, see security_kernel_load_data() for more * information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_load_data(char *buf, loff_t size, enum kernel_load_data_id id, char *description) { return call_int_hook(kernel_post_load_data, buf, size, id, description); } EXPORT_SYMBOL_GPL(security_kernel_post_load_data); /** * security_task_fix_setuid() - Update LSM with new user id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag values * * Update the module's state after setting one or more of the user identity * attributes of the current process. The @flags parameter indicates which of * the set*uid system calls invoked this hook. If @new is the set of * credentials that will be installed. Modifications should be made to this * rather than to @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setuid, new, old, flags); } /** * security_task_fix_setgid() - Update LSM with new group id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag value * * Update the module's state after setting one or more of the group identity * attributes of the current process. The @flags parameter indicates which of * the set*gid system calls invoked this hook. @new is the set of credentials * that will be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setgid, new, old, flags); } /** * security_task_fix_setgroups() - Update LSM with new supplementary groups * @new: updated credentials * @old: credentials being replaced * * Update the module's state after setting the supplementary group identity * attributes of the current process. @new is the set of credentials that will * be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgroups(struct cred *new, const struct cred *old) { return call_int_hook(task_fix_setgroups, new, old); } /** * security_task_setpgid() - Check if setting the pgid is allowed * @p: task being modified * @pgid: new pgid * * Check permission before setting the process group identifier of the process * @p to @pgid. * * Return: Returns 0 if permission is granted. */ int security_task_setpgid(struct task_struct *p, pid_t pgid) { return call_int_hook(task_setpgid, p, pgid); } /** * security_task_getpgid() - Check if getting the pgid is allowed * @p: task * * Check permission before getting the process group identifier of the process * @p. * * Return: Returns 0 if permission is granted. */ int security_task_getpgid(struct task_struct *p) { return call_int_hook(task_getpgid, p); } /** * security_task_getsid() - Check if getting the session id is allowed * @p: task * * Check permission before getting the session identifier of the process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getsid(struct task_struct *p) { return call_int_hook(task_getsid, p); } /** * security_current_getlsmprop_subj() - Current task's subjective LSM data * @prop: lsm specific information * * Retrieve the subjective security identifier of the current task and return * it in @prop. */ void security_current_getlsmprop_subj(struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(current_getlsmprop_subj, prop); } EXPORT_SYMBOL(security_current_getlsmprop_subj); /** * security_task_getlsmprop_obj() - Get a task's objective LSM data * @p: target task * @prop: lsm specific information * * Retrieve the objective security identifier of the task_struct in @p and * return it in @prop. */ void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(task_getlsmprop_obj, p, prop); } EXPORT_SYMBOL(security_task_getlsmprop_obj); /** * security_task_setnice() - Check if setting a task's nice value is allowed * @p: target task * @nice: nice value * * Check permission before setting the nice value of @p to @nice. * * Return: Returns 0 if permission is granted. */ int security_task_setnice(struct task_struct *p, int nice) { return call_int_hook(task_setnice, p, nice); } /** * security_task_setioprio() - Check if setting a task's ioprio is allowed * @p: target task * @ioprio: ioprio value * * Check permission before setting the ioprio value of @p to @ioprio. * * Return: Returns 0 if permission is granted. */ int security_task_setioprio(struct task_struct *p, int ioprio) { return call_int_hook(task_setioprio, p, ioprio); } /** * security_task_getioprio() - Check if getting a task's ioprio is allowed * @p: task * * Check permission before getting the ioprio value of @p. * * Return: Returns 0 if permission is granted. */ int security_task_getioprio(struct task_struct *p) { return call_int_hook(task_getioprio, p); } /** * security_task_prlimit() - Check if get/setting resources limits is allowed * @cred: current task credentials * @tcred: target task credentials * @flags: LSM_PRLIMIT_* flag bits indicating a get/set/both * * Check permission before getting and/or setting the resource limits of * another task. * * Return: Returns 0 if permission is granted. */ int security_task_prlimit(const struct cred *cred, const struct cred *tcred, unsigned int flags) { return call_int_hook(task_prlimit, cred, tcred, flags); } /** * security_task_setrlimit() - Check if setting a new rlimit value is allowed * @p: target task's group leader * @resource: resource whose limit is being set * @new_rlim: new resource limit * * Check permission before setting the resource limits of process @p for * @resource to @new_rlim. The old resource limit values can be examined by * dereferencing (p->signal->rlim + resource). * * Return: Returns 0 if permission is granted. */ int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) { return call_int_hook(task_setrlimit, p, resource, new_rlim); } /** * security_task_setscheduler() - Check if setting sched policy/param is allowed * @p: target task * * Check permission before setting scheduling policy and/or parameters of * process @p. * * Return: Returns 0 if permission is granted. */ int security_task_setscheduler(struct task_struct *p) { return call_int_hook(task_setscheduler, p); } /** * security_task_getscheduler() - Check if getting scheduling info is allowed * @p: target task * * Check permission before obtaining scheduling information for process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getscheduler(struct task_struct *p) { return call_int_hook(task_getscheduler, p); } /** * security_task_movememory() - Check if moving memory is allowed * @p: task * * Check permission before moving memory owned by process @p. * * Return: Returns 0 if permission is granted. */ int security_task_movememory(struct task_struct *p) { return call_int_hook(task_movememory, p); } /** * security_task_kill() - Check if sending a signal is allowed * @p: target process * @info: signal information * @sig: signal value * @cred: credentials of the signal sender, NULL if @current * * Check permission before sending signal @sig to @p. @info can be NULL, the * constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming from * the kernel and should typically be permitted. SIGIO signals are handled * separately by the send_sigiotask hook in file_security_ops. * * Return: Returns 0 if permission is granted. */ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { return call_int_hook(task_kill, p, info, sig, cred); } /** * security_task_prctl() - Check if a prctl op is allowed * @option: operation * @arg2: argument * @arg3: argument * @arg4: argument * @arg5: argument * * Check permission before performing a process control operation on the * current process. * * Return: Return -ENOSYS if no-one wanted to handle this op, any other value * to cause prctl() to return immediately with that value. */ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int thisrc; int rc = LSM_RET_DEFAULT(task_prctl); struct lsm_static_call *scall; lsm_for_each_hook(scall, task_prctl) { thisrc = scall->hl->hook.task_prctl(option, arg2, arg3, arg4, arg5); if (thisrc != LSM_RET_DEFAULT(task_prctl)) { rc = thisrc; if (thisrc != 0) break; } } return rc; } /** * security_task_to_inode() - Set the security attributes of a task's inode * @p: task * @inode: inode * * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. */ void security_task_to_inode(struct task_struct *p, struct inode *inode) { call_void_hook(task_to_inode, p, inode); } /** * security_create_user_ns() - Check if creating a new userns is allowed * @cred: prepared creds * * Check permission prior to creating a new user namespace. * * Return: Returns 0 if successful, otherwise < 0 error code. */ int security_create_user_ns(const struct cred *cred) { return call_int_hook(userns_create, cred); } /** * security_ipc_permission() - Check if sysv ipc access is allowed * @ipcp: ipc permission structure * @flag: requested permissions * * Check permissions for access to IPC. * * Return: Returns 0 if permission is granted. */ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag) { return call_int_hook(ipc_permission, ipcp, flag); } /** * security_ipc_getlsmprop() - Get the sysv ipc object LSM data * @ipcp: ipc permission structure * @prop: pointer to lsm information * * Get the lsm information associated with the ipc object. */ void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(ipc_getlsmprop, ipcp, prop); } /** * security_msg_msg_alloc() - Allocate a sysv ipc message LSM blob * @msg: message structure * * Allocate and attach a security structure to the msg->security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if operation was successful and permission is granted. */ int security_msg_msg_alloc(struct msg_msg *msg) { int rc = lsm_msg_msg_alloc(msg); if (unlikely(rc)) return rc; rc = call_int_hook(msg_msg_alloc_security, msg); if (unlikely(rc)) security_msg_msg_free(msg); return rc; } /** * security_msg_msg_free() - Free a sysv ipc message LSM blob * @msg: message structure * * Deallocate the security structure for this message. */ void security_msg_msg_free(struct msg_msg *msg) { call_void_hook(msg_msg_free_security, msg); kfree(msg->security); msg->security = NULL; } /** * security_msg_queue_alloc() - Allocate a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Allocate and attach a security structure to @msg. The security field is * initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_msg_queue_alloc(struct kern_ipc_perm *msq) { int rc = lsm_ipc_alloc(msq); if (unlikely(rc)) return rc; rc = call_int_hook(msg_queue_alloc_security, msq); if (unlikely(rc)) security_msg_queue_free(msq); return rc; } /** * security_msg_queue_free() - Free a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Deallocate security field @perm->security for the message queue. */ void security_msg_queue_free(struct kern_ipc_perm *msq) { call_void_hook(msg_queue_free_security, msq); kfree(msq->security); msq->security = NULL; } /** * security_msg_queue_associate() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @msqflg: operation flags * * Check permission when a message queue is requested through the msgget system * call. This hook is only called when returning the message queue identifier * for an existing message queue, not when a new message queue is created. * * Return: Return 0 if permission is granted. */ int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg) { return call_int_hook(msg_queue_associate, msq, msqflg); } /** * security_msg_queue_msgctl() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @cmd: operation * * Check permission when a message control operation specified by @cmd is to be * performed on the message queue with permissions. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) { return call_int_hook(msg_queue_msgctl, msq, cmd); } /** * security_msg_queue_msgsnd() - Check if sending a sysv ipc message is allowed * @msq: sysv ipc permission structure * @msg: message * @msqflg: operation flags * * Check permission before a message, @msg, is enqueued on the message queue * with permissions specified in @msq. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg) { return call_int_hook(msg_queue_msgsnd, msq, msg, msqflg); } /** * security_msg_queue_msgrcv() - Check if receiving a sysv ipc msg is allowed * @msq: sysv ipc permission structure * @msg: message * @target: target task * @type: type of message requested * @mode: operation flags * * Check permission before a message, @msg, is removed from the message queue. * The @target task structure contains a pointer to the process that will be * receiving the message (not equal to the current process when inline receives * are being performed). * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return call_int_hook(msg_queue_msgrcv, msq, msg, target, type, mode); } /** * security_shm_alloc() - Allocate a sysv shm LSM blob * @shp: sysv ipc permission structure * * Allocate and attach a security structure to the @shp security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_shm_alloc(struct kern_ipc_perm *shp) { int rc = lsm_ipc_alloc(shp); if (unlikely(rc)) return rc; rc = call_int_hook(shm_alloc_security, shp); if (unlikely(rc)) security_shm_free(shp); return rc; } /** * security_shm_free() - Free a sysv shm LSM blob * @shp: sysv ipc permission structure * * Deallocate the security structure @perm->security for the memory segment. */ void security_shm_free(struct kern_ipc_perm *shp) { call_void_hook(shm_free_security, shp); kfree(shp->security); shp->security = NULL; } /** * security_shm_associate() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @shmflg: operation flags * * Check permission when a shared memory region is requested through the shmget * system call. This hook is only called when returning the shared memory * region identifier for an existing region, not when a new shared memory * region is created. * * Return: Returns 0 if permission is granted. */ int security_shm_associate(struct kern_ipc_perm *shp, int shmflg) { return call_int_hook(shm_associate, shp, shmflg); } /** * security_shm_shmctl() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @cmd: operation * * Check permission when a shared memory control operation specified by @cmd is * to be performed on the shared memory region with permissions in @shp. * * Return: Return 0 if permission is granted. */ int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) { return call_int_hook(shm_shmctl, shp, cmd); } /** * security_shm_shmat() - Check if a sysv shm attach operation is allowed * @shp: sysv ipc permission structure * @shmaddr: address of memory region to attach * @shmflg: operation flags * * Check permissions prior to allowing the shmat system call to attach the * shared memory segment with permissions @shp to the data segment of the * calling process. The attaching address is specified by @shmaddr. * * Return: Returns 0 if permission is granted. */ int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg) { return call_int_hook(shm_shmat, shp, shmaddr, shmflg); } /** * security_sem_alloc() - Allocate a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Allocate and attach a security structure to the @sma security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_sem_alloc(struct kern_ipc_perm *sma) { int rc = lsm_ipc_alloc(sma); if (unlikely(rc)) return rc; rc = call_int_hook(sem_alloc_security, sma); if (unlikely(rc)) security_sem_free(sma); return rc; } /** * security_sem_free() - Free a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Deallocate security structure @sma->security for the semaphore. */ void security_sem_free(struct kern_ipc_perm *sma) { call_void_hook(sem_free_security, sma); kfree(sma->security); sma->security = NULL; } /** * security_sem_associate() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @semflg: operation flags * * Check permission when a semaphore is requested through the semget system * call. This hook is only called when returning the semaphore identifier for * an existing semaphore, not when a new one must be created. * * Return: Returns 0 if permission is granted. */ int security_sem_associate(struct kern_ipc_perm *sma, int semflg) { return call_int_hook(sem_associate, sma, semflg); } /** * security_sem_semctl() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @cmd: operation * * Check permission when a semaphore operation specified by @cmd is to be * performed on the semaphore. * * Return: Returns 0 if permission is granted. */ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) { return call_int_hook(sem_semctl, sma, cmd); } /** * security_sem_semop() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @sops: operations to perform * @nsops: number of operations * @alter: flag indicating changes will be made * * Check permissions before performing operations on members of the semaphore * set. If the @alter flag is nonzero, the semaphore set may be modified. * * Return: Returns 0 if permission is granted. */ int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter) { return call_int_hook(sem_semop, sma, sops, nsops, alter); } /** * security_d_instantiate() - Populate an inode's LSM state based on a dentry * @dentry: dentry * @inode: inode * * Fill in @inode security information for a @dentry if allowed. */ void security_d_instantiate(struct dentry *dentry, struct inode *inode) { if (unlikely(inode && IS_PRIVATE(inode))) return; call_void_hook(d_instantiate, dentry, inode); } EXPORT_SYMBOL(security_d_instantiate); /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_getselfattr - Read an LSM attribute of the current process. * @attr: which attribute to return * @uctx: the user-space destination for the information, or NULL * @size: pointer to the size of space available to receive the data * @flags: special handling options. LSM_FLAG_SINGLE indicates that only * attributes associated with the LSM identified in the passed @ctx be * reported. * * A NULL value for @uctx can be used to get both the number of attributes * and the size of the data. * * Returns the number of attributes found on success, negative value * on error. @size is reset to the total size of the data. * If @size is insufficient to contain the data -E2BIG is returned. */ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 __user *size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, }; u8 __user *base = (u8 __user *)uctx; u32 entrysize; u32 total = 0; u32 left; bool toobig = false; bool single = false; int count = 0; int rc; if (attr == LSM_ATTR_UNDEF) return -EINVAL; if (size == NULL) return -EINVAL; if (get_user(left, size)) return -EFAULT; if (flags) { /* * Only flag supported is LSM_FLAG_SINGLE */ if (flags != LSM_FLAG_SINGLE || !uctx) return -EINVAL; if (copy_from_user(&lctx, uctx, sizeof(lctx))) return -EFAULT; /* * If the LSM ID isn't specified it is an error. */ if (lctx.id == LSM_ID_UNDEF) return -EINVAL; single = true; } /* * In the usual case gather all the data from the LSMs. * In the single case only get the data from the LSM specified. */ lsm_for_each_hook(scall, getselfattr) { if (single && lctx.id != scall->hl->lsmid->id) continue; entrysize = left; if (base) uctx = (struct lsm_ctx __user *)(base + total); rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags); if (rc == -EOPNOTSUPP) continue; if (rc == -E2BIG) { rc = 0; left = 0; toobig = true; } else if (rc < 0) return rc; else left -= entrysize; total += entrysize; count += rc; if (single) break; } if (put_user(total, size)) return -EFAULT; if (toobig) return -E2BIG; if (count == 0) return LSM_RET_DEFAULT(getselfattr); return count; } /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_setselfattr - Set an LSM attribute on the current process. * @attr: which attribute to set * @uctx: the user-space source for the information * @size: the size of the data * @flags: reserved for future use, must be 0 * * Set an LSM attribute for the current process. The LSM, attribute * and new value are included in @uctx. * * Returns 0 on success, -EINVAL if the input is inconsistent, -EFAULT * if the user buffer is inaccessible, E2BIG if size is too big, or an * LSM specific failure. */ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx *lctx; int rc = LSM_RET_DEFAULT(setselfattr); u64 required_len; if (flags) return -EINVAL; if (size < sizeof(*lctx)) return -EINVAL; if (size > PAGE_SIZE) return -E2BIG; lctx = memdup_user(uctx, size); if (IS_ERR(lctx)) return PTR_ERR(lctx); if (size < lctx->len || check_add_overflow(sizeof(*lctx), lctx->ctx_len, &required_len) || lctx->len < required_len) { rc = -EINVAL; goto free_out; } lsm_for_each_hook(scall, setselfattr) if ((scall->hl->lsmid->id) == lctx->id) { rc = scall->hl->hook.setselfattr(attr, lctx, size, flags); break; } free_out: kfree(lctx); return rc; } /** * security_getprocattr() - Read an attribute for a task * @p: the task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * * Read attribute @name for task @p and store it into @value if allowed. * * Return: Returns the length of @value on success, a negative value otherwise. */ int security_getprocattr(struct task_struct *p, int lsmid, const char *name, char **value) { struct lsm_static_call *scall; lsm_for_each_hook(scall, getprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.getprocattr(p, name, value); } return LSM_RET_DEFAULT(getprocattr); } /** * security_setprocattr() - Set an attribute for a task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * @size: attribute value size * * Write (set) the current task's attribute @name to @value, size @size if * allowed. * * Return: Returns bytes written on success, a negative value otherwise. */ int security_setprocattr(int lsmid, const char *name, void *value, size_t size) { struct lsm_static_call *scall; lsm_for_each_hook(scall, setprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.setprocattr(name, value, size); } return LSM_RET_DEFAULT(setprocattr); } /** * security_ismaclabel() - Check if the named attribute is a MAC label * @name: full extended attribute name * * Check if the extended attribute specified by @name represents a MAC label. * * Return: Returns 1 if name is a MAC attribute otherwise returns 0. */ int security_ismaclabel(const char *name) { return call_int_hook(ismaclabel, name); } EXPORT_SYMBOL(security_ismaclabel); /** * security_secid_to_secctx() - Convert a secid to a secctx * @secid: secid * @cp: the LSM context * * Convert secid to security context. If @cp is NULL the length of the * result will be returned, but no data will be returned. This * does mean that the length could change between calls to check the length and * the next call which actually allocates and returns the data. * * Return: Return length of data on success, error on failure. */ int security_secid_to_secctx(u32 secid, struct lsm_context *cp) { return call_int_hook(secid_to_secctx, secid, cp); } EXPORT_SYMBOL(security_secid_to_secctx); /** * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx * @prop: lsm specific information * @cp: the LSM context * @lsmid: which security module to report * * Convert a @prop entry to security context. If @cp is NULL the * length of the result will be returned. This does mean that the * length could change between calls to check the length and the * next call which actually allocates and returns the @cp. * * @lsmid identifies which LSM should supply the context. * A value of LSM_ID_UNDEF indicates that the first LSM suppling * the hook should be used. This is used in cases where the * ID of the supplying LSM is unambiguous. * * Return: Return length of data on success, error on failure. */ int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp, int lsmid) { struct lsm_static_call *scall; lsm_for_each_hook(scall, lsmprop_to_secctx) { if (lsmid != LSM_ID_UNDEF && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.lsmprop_to_secctx(prop, cp); } return LSM_RET_DEFAULT(lsmprop_to_secctx); } EXPORT_SYMBOL(security_lsmprop_to_secctx); /** * security_secctx_to_secid() - Convert a secctx to a secid * @secdata: secctx * @seclen: length of secctx * @secid: secid * * Convert security context to secid. * * Return: Returns 0 on success, error on failure. */ int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { *secid = 0; return call_int_hook(secctx_to_secid, secdata, seclen, secid); } EXPORT_SYMBOL(security_secctx_to_secid); /** * security_release_secctx() - Free a secctx buffer * @cp: the security context * * Release the security context. */ void security_release_secctx(struct lsm_context *cp) { call_void_hook(release_secctx, cp); memset(cp, 0, sizeof(*cp)); } EXPORT_SYMBOL(security_release_secctx); /** * security_inode_invalidate_secctx() - Invalidate an inode's security label * @inode: inode * * Notify the security module that it must revalidate the security context of * an inode. */ void security_inode_invalidate_secctx(struct inode *inode) { call_void_hook(inode_invalidate_secctx, inode); } EXPORT_SYMBOL(security_inode_invalidate_secctx); /** * security_inode_notifysecctx() - Notify the LSM of an inode's security label * @inode: inode * @ctx: secctx * @ctxlen: length of secctx * * Notify the security module of what the security context of an inode should * be. Initializes the incore security context managed by the security module * for this inode. Example usage: NFS client invokes this hook to initialize * the security context in its incore inode to the value provided by the server * for the file when the server returned the file's attributes to the client. * Must be called with inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return call_int_hook(inode_notifysecctx, inode, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_notifysecctx); /** * security_inode_setsecctx() - Change the security label of an inode * @dentry: inode * @ctx: secctx * @ctxlen: length of secctx * * Change the security context of an inode. Updates the incore security * context managed by the security module and invokes the fs code as needed * (via __vfs_setxattr_noperm) to update any backing xattrs that represent the * context. Example usage: NFS server invokes this hook to change the security * context in its incore inode and on the backing filesystem to a value * provided by the client on a SETATTR operation. Must be called with * inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return call_int_hook(inode_setsecctx, dentry, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_setsecctx); /** * security_inode_getsecctx() - Get the security label of an inode * @inode: inode * @cp: security context * * On success, returns 0 and fills out @cp with the security context * for the given @inode. * * Return: Returns 0 on success, error on failure. */ int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { memset(cp, 0, sizeof(*cp)); return call_int_hook(inode_getsecctx, inode, cp); } EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_WATCH_QUEUE /** * security_post_notification() - Check if a watch notification can be posted * @w_cred: credentials of the task that set the watch * @cred: credentials of the task which triggered the watch * @n: the notification * * Check to see if a watch notification can be posted to a particular queue. * * Return: Returns 0 if permission is granted. */ int security_post_notification(const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) { return call_int_hook(post_notification, w_cred, cred, n); } #endif /* CONFIG_WATCH_QUEUE */ #ifdef CONFIG_KEY_NOTIFICATIONS /** * security_watch_key() - Check if a task is allowed to watch for key events * @key: the key to watch * * Check to see if a process is allowed to watch for event notifications from * a key or keyring. * * Return: Returns 0 if permission is granted. */ int security_watch_key(struct key *key) { return call_int_hook(watch_key, key); } #endif /* CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK /** * security_netlink_send() - Save info and check if netlink sending is allowed * @sk: sending socket * @skb: netlink message * * Save security information for a netlink message so that permission checking * can be performed when the message is processed. The security information * can be saved using the eff_cap field of the netlink_skb_parms structure. * Also may be used to provide fine grained control over message transmission. * * Return: Returns 0 if the information was successfully saved and message is * allowed to be transmitted. */ int security_netlink_send(struct sock *sk, struct sk_buff *skb) { return call_int_hook(netlink_send, sk, skb); } /** * security_unix_stream_connect() - Check if a AF_UNIX stream is allowed * @sock: originating sock * @other: peer sock * @newsk: new sock * * Check permissions before establishing a Unix domain stream connection * between @sock and @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return call_int_hook(unix_stream_connect, sock, other, newsk); } EXPORT_SYMBOL(security_unix_stream_connect); /** * security_unix_may_send() - Check if AF_UNIX socket can send datagrams * @sock: originating sock * @other: peer sock * * Check permissions before connecting or sending datagrams from @sock to * @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_may_send(struct socket *sock, struct socket *other) { return call_int_hook(unix_may_send, sock, other); } EXPORT_SYMBOL(security_unix_may_send); /** * security_socket_create() - Check if creating a new socket is allowed * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * Check permissions prior to creating a new socket. * * Return: Returns 0 if permission is granted. */ int security_socket_create(int family, int type, int protocol, int kern) { return call_int_hook(socket_create, family, type, protocol, kern); } /** * security_socket_post_create() - Initialize a newly created socket * @sock: socket * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * This hook allows a module to update or allocate a per-socket security * structure. Note that the security field was not added directly to the socket * structure, but rather, the socket security information is stored in the * associated inode. Typically, the inode alloc_security hook will allocate * and attach security information to SOCK_INODE(sock)->i_security. This hook * may be used to update the SOCK_INODE(sock)->i_security field with additional * information that wasn't available when the inode was allocated. * * Return: Returns 0 if permission is granted. */ int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { return call_int_hook(socket_post_create, sock, family, type, protocol, kern); } /** * security_socket_socketpair() - Check if creating a socketpair is allowed * @socka: first socket * @sockb: second socket * * Check permissions before creating a fresh pair of sockets. * * Return: Returns 0 if permission is granted and the connection was * established. */ int security_socket_socketpair(struct socket *socka, struct socket *sockb) { return call_int_hook(socket_socketpair, socka, sockb); } EXPORT_SYMBOL(security_socket_socketpair); /** * security_socket_bind() - Check if a socket bind operation is allowed * @sock: socket * @address: requested bind address * @addrlen: length of address * * Check permission before socket protocol layer bind operation is performed * and the socket @sock is bound to the address specified in the @address * parameter. * * Return: Returns 0 if permission is granted. */ int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_bind, sock, address, addrlen); } /** * security_socket_connect() - Check if a socket connect operation is allowed * @sock: socket * @address: address of remote connection point * @addrlen: length of address * * Check permission before socket protocol layer connect operation attempts to * connect socket @sock to a remote address, @address. * * Return: Returns 0 if permission is granted. */ int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_connect, sock, address, addrlen); } /** * security_socket_listen() - Check if a socket is allowed to listen * @sock: socket * @backlog: connection queue size * * Check permission before socket protocol layer listen operation. * * Return: Returns 0 if permission is granted. */ int security_socket_listen(struct socket *sock, int backlog) { return call_int_hook(socket_listen, sock, backlog); } /** * security_socket_accept() - Check if a socket is allowed to accept connections * @sock: listening socket * @newsock: newly creation connection socket * * Check permission before accepting a new connection. Note that the new * socket, @newsock, has been created and some information copied to it, but * the accept operation has not actually been performed. * * Return: Returns 0 if permission is granted. */ int security_socket_accept(struct socket *sock, struct socket *newsock) { return call_int_hook(socket_accept, sock, newsock); } /** * security_socket_sendmsg() - Check if sending a message is allowed * @sock: sending socket * @msg: message to send * @size: size of message * * Check permission before transmitting a message to another socket. * * Return: Returns 0 if permission is granted. */ int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return call_int_hook(socket_sendmsg, sock, msg, size); } /** * security_socket_recvmsg() - Check if receiving a message is allowed * @sock: receiving socket * @msg: message to receive * @size: size of message * @flags: operational flags * * Check permission before receiving a message from a socket. * * Return: Returns 0 if permission is granted. */ int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags) { return call_int_hook(socket_recvmsg, sock, msg, size, flags); } /** * security_socket_getsockname() - Check if reading the socket addr is allowed * @sock: socket * * Check permission before reading the local address (name) of the socket * object. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockname(struct socket *sock) { return call_int_hook(socket_getsockname, sock); } /** * security_socket_getpeername() - Check if reading the peer's addr is allowed * @sock: socket * * Check permission before the remote address (name) of a socket object. * * Return: Returns 0 if permission is granted. */ int security_socket_getpeername(struct socket *sock) { return call_int_hook(socket_getpeername, sock); } /** * security_socket_getsockopt() - Check if reading a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before retrieving the options associated with socket * @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_getsockopt, sock, level, optname); } /** * security_socket_setsockopt() - Check if setting a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before setting the options associated with socket @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_setsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_setsockopt, sock, level, optname); } /** * security_socket_shutdown() - Checks if shutting down the socket is allowed * @sock: socket * @how: flag indicating how sends and receives are handled * * Checks permission before all or part of a connection on the socket @sock is * shut down. * * Return: Returns 0 if permission is granted. */ int security_socket_shutdown(struct socket *sock, int how) { return call_int_hook(socket_shutdown, sock, how); } /** * security_sock_rcv_skb() - Check if an incoming network packet is allowed * @sk: destination sock * @skb: incoming packet * * Check permissions on incoming network packets. This hook is distinct from * Netfilter's IP input hooks since it is the first time that the incoming * sk_buff @skb has been associated with a particular socket, @sk. Must not * sleep inside this hook because some callers hold spinlocks. * * Return: Returns 0 if permission is granted. */ int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { return call_int_hook(socket_sock_rcv_skb, sk, skb); } EXPORT_SYMBOL(security_sock_rcv_skb); /** * security_socket_getpeersec_stream() - Get the remote peer label * @sock: socket * @optval: destination buffer * @optlen: size of peer label copied into the buffer * @len: maximum size of the destination buffer * * This hook allows the security module to provide peer socket security state * for unix or connected tcp sockets to userspace via getsockopt SO_GETPEERSEC. * For tcp sockets this can be meaningful if the socket is associated with an * ipsec SA. * * Return: Returns 0 if all is well, otherwise, typical getsockopt return * values. */ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) { return call_int_hook(socket_getpeersec_stream, sock, optval, optlen, len); } /** * security_socket_getpeersec_dgram() - Get the remote peer label * @sock: socket * @skb: datagram packet * @secid: remote peer label secid * * This hook allows the security module to provide peer socket security state * for udp sockets on a per-packet basis to userspace via getsockopt * SO_GETPEERSEC. The application must first have indicated the IP_PASSSEC * option via getsockopt. It can then retrieve the security state returned by * this hook for a packet via the SCM_SECURITY ancillary message type. * * Return: Returns 0 on success, error on failure. */ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return call_int_hook(socket_getpeersec_dgram, sock, skb, secid); } EXPORT_SYMBOL(security_socket_getpeersec_dgram); /** * lsm_sock_alloc - allocate a composite sock blob * @sock: the sock that needs a blob * @gfp: allocation mode * * Allocate the sock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_sock_alloc(struct sock *sock, gfp_t gfp) { return lsm_blob_alloc(&sock->sk_security, blob_sizes.lbs_sock, gfp); } /** * security_sk_alloc() - Allocate and initialize a sock's LSM blob * @sk: sock * @family: protocol family * @priority: gfp flags * * Allocate and attach a security structure to the sk->sk_security field, which * is used to copy security attributes between local stream sockets. * * Return: Returns 0 on success, error on failure. */ int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { int rc = lsm_sock_alloc(sk, priority); if (unlikely(rc)) return rc; rc = call_int_hook(sk_alloc_security, sk, family, priority); if (unlikely(rc)) security_sk_free(sk); return rc; } /** * security_sk_free() - Free the sock's LSM blob * @sk: sock * * Deallocate security structure. */ void security_sk_free(struct sock *sk) { call_void_hook(sk_free_security, sk); kfree(sk->sk_security); sk->sk_security = NULL; } /** * security_sk_clone() - Clone a sock's LSM state * @sk: original sock * @newsk: target sock * * Clone/copy security structure. */ void security_sk_clone(const struct sock *sk, struct sock *newsk) { call_void_hook(sk_clone_security, sk, newsk); } EXPORT_SYMBOL(security_sk_clone); /** * security_sk_classify_flow() - Set a flow's secid based on socket * @sk: original socket * @flic: target flow * * Set the target flow's secid to socket's secid. */ void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { call_void_hook(sk_getsecid, sk, &flic->flowic_secid); } EXPORT_SYMBOL(security_sk_classify_flow); /** * security_req_classify_flow() - Set a flow's secid based on request_sock * @req: request_sock * @flic: target flow * * Sets @flic's secid to @req's secid. */ void security_req_classify_flow(const struct request_sock *req, struct flowi_common *flic) { call_void_hook(req_classify_flow, req, flic); } EXPORT_SYMBOL(security_req_classify_flow); /** * security_sock_graft() - Reconcile LSM state when grafting a sock on a socket * @sk: sock being grafted * @parent: target parent socket * * Sets @parent's inode secid to @sk's secid and update @sk with any necessary * LSM state from @parent. */ void security_sock_graft(struct sock *sk, struct socket *parent) { call_void_hook(sock_graft, sk, parent); } EXPORT_SYMBOL(security_sock_graft); /** * security_inet_conn_request() - Set request_sock state using incoming connect * @sk: parent listening sock * @skb: incoming connection * @req: new request_sock * * Initialize the @req LSM state based on @sk and the incoming connect in @skb. * * Return: Returns 0 if permission is granted. */ int security_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return call_int_hook(inet_conn_request, sk, skb, req); } EXPORT_SYMBOL(security_inet_conn_request); /** * security_inet_csk_clone() - Set new sock LSM state based on request_sock * @newsk: new sock * @req: connection request_sock * * Set that LSM state of @sock using the LSM state from @req. */ void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { call_void_hook(inet_csk_clone, newsk, req); } /** * security_inet_conn_established() - Update sock's LSM state with connection * @sk: sock * @skb: connection packet * * Update @sock's LSM state to represent a new connection from @skb. */ void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { call_void_hook(inet_conn_established, sk, skb); } EXPORT_SYMBOL(security_inet_conn_established); /** * security_secmark_relabel_packet() - Check if setting a secmark is allowed * @secid: new secmark value * * Check if the process should be allowed to relabel packets to @secid. * * Return: Returns 0 if permission is granted. */ int security_secmark_relabel_packet(u32 secid) { return call_int_hook(secmark_relabel_packet, secid); } EXPORT_SYMBOL(security_secmark_relabel_packet); /** * security_secmark_refcount_inc() - Increment the secmark labeling rule count * * Tells the LSM to increment the number of secmark labeling rules loaded. */ void security_secmark_refcount_inc(void) { call_void_hook(secmark_refcount_inc); } EXPORT_SYMBOL(security_secmark_refcount_inc); /** * security_secmark_refcount_dec() - Decrement the secmark labeling rule count * * Tells the LSM to decrement the number of secmark labeling rules loaded. */ void security_secmark_refcount_dec(void) { call_void_hook(secmark_refcount_dec); } EXPORT_SYMBOL(security_secmark_refcount_dec); /** * security_tun_dev_alloc_security() - Allocate a LSM blob for a TUN device * @security: pointer to the LSM blob * * This hook allows a module to allocate a security structure for a TUN device, * returning the pointer in @security. * * Return: Returns a zero on success, negative values on failure. */ int security_tun_dev_alloc_security(void **security) { int rc; rc = lsm_blob_alloc(security, blob_sizes.lbs_tun_dev, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(tun_dev_alloc_security, *security); if (rc) { kfree(*security); *security = NULL; } return rc; } EXPORT_SYMBOL(security_tun_dev_alloc_security); /** * security_tun_dev_free_security() - Free a TUN device LSM blob * @security: LSM blob * * This hook allows a module to free the security structure for a TUN device. */ void security_tun_dev_free_security(void *security) { kfree(security); } EXPORT_SYMBOL(security_tun_dev_free_security); /** * security_tun_dev_create() - Check if creating a TUN device is allowed * * Check permissions prior to creating a new TUN device. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_create(void) { return call_int_hook(tun_dev_create); } EXPORT_SYMBOL(security_tun_dev_create); /** * security_tun_dev_attach_queue() - Check if attaching a TUN queue is allowed * @security: TUN device LSM blob * * Check permissions prior to attaching to a TUN device queue. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach_queue(void *security) { return call_int_hook(tun_dev_attach_queue, security); } EXPORT_SYMBOL(security_tun_dev_attach_queue); /** * security_tun_dev_attach() - Update TUN device LSM state on attach * @sk: associated sock * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's sock structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach(struct sock *sk, void *security) { return call_int_hook(tun_dev_attach, sk, security); } EXPORT_SYMBOL(security_tun_dev_attach); /** * security_tun_dev_open() - Update TUN device LSM state on open * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's security structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_open(void *security) { return call_int_hook(tun_dev_open, security); } EXPORT_SYMBOL(security_tun_dev_open); /** * security_sctp_assoc_request() - Update the LSM on a SCTP association req * @asoc: SCTP association * @skb: packet requesting the association * * Passes the @asoc and @chunk->skb of the association INIT packet to the LSM. * * Return: Returns 0 on success, error on failure. */ int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_request, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_request); /** * security_sctp_bind_connect() - Validate a list of addrs for a SCTP option * @sk: socket * @optname: SCTP option to validate * @address: list of IP addresses to validate * @addrlen: length of the address list * * Validiate permissions required for each address associated with sock @sk. * Depending on @optname, the addresses will be treated as either a connect or * bind service. The @addrlen is calculated on each IPv4 and IPv6 address using * sizeof(struct sockaddr_in) or sizeof(struct sockaddr_in6). * * Return: Returns 0 on success, error on failure. */ int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen) { return call_int_hook(sctp_bind_connect, sk, optname, address, addrlen); } EXPORT_SYMBOL(security_sctp_bind_connect); /** * security_sctp_sk_clone() - Clone a SCTP sock's LSM state * @asoc: SCTP association * @sk: original sock * @newsk: target sock * * Called whenever a new socket is created by accept(2) (i.e. a TCP style * socket) or when a socket is 'peeled off' e.g userspace calls * sctp_peeloff(3). */ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk) { call_void_hook(sctp_sk_clone, asoc, sk, newsk); } EXPORT_SYMBOL(security_sctp_sk_clone); /** * security_sctp_assoc_established() - Update LSM state when assoc established * @asoc: SCTP association * @skb: packet establishing the association * * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet to the * security module. * * Return: Returns 0 if permission is granted. */ int security_sctp_assoc_established(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_established, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_established); /** * security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket * @sk: the owning MPTCP socket * @ssk: the new subflow * * Update the labeling for the given MPTCP subflow, to match the one of the * owning MPTCP socket. This hook has to be called after the socket creation and * initialization via the security_socket_create() and * security_socket_post_create() LSM hooks. * * Return: Returns 0 on success or a negative error code on failure. */ int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk) { return call_int_hook(mptcp_add_subflow, sk, ssk); } #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND /** * security_ib_pkey_access() - Check if access to an IB pkey is allowed * @sec: LSM blob * @subnet_prefix: subnet prefix of the port * @pkey: IB pkey * * Check permission to access a pkey when modifying a QP. * * Return: Returns 0 if permission is granted. */ int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) { return call_int_hook(ib_pkey_access, sec, subnet_prefix, pkey); } EXPORT_SYMBOL(security_ib_pkey_access); /** * security_ib_endport_manage_subnet() - Check if SMPs traffic is allowed * @sec: LSM blob * @dev_name: IB device name * @port_num: port number * * Check permissions to send and receive SMPs on a end port. * * Return: Returns 0 if permission is granted. */ int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) { return call_int_hook(ib_endport_manage_subnet, sec, dev_name, port_num); } EXPORT_SYMBOL(security_ib_endport_manage_subnet); /** * security_ib_alloc_security() - Allocate an Infiniband LSM blob * @sec: LSM blob * * Allocate a security structure for Infiniband objects. * * Return: Returns 0 on success, non-zero on failure. */ int security_ib_alloc_security(void **sec) { int rc; rc = lsm_blob_alloc(sec, blob_sizes.lbs_ib, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(ib_alloc_security, *sec); if (rc) { kfree(*sec); *sec = NULL; } return rc; } EXPORT_SYMBOL(security_ib_alloc_security); /** * security_ib_free_security() - Free an Infiniband LSM blob * @sec: LSM blob * * Deallocate an Infiniband security structure. */ void security_ib_free_security(void *sec) { kfree(sec); } EXPORT_SYMBOL(security_ib_free_security); #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM /** * security_xfrm_policy_alloc() - Allocate a xfrm policy LSM blob * @ctxp: xfrm security context being added to the SPD * @sec_ctx: security label provided by userspace * @gfp: gfp flags * * Allocate a security structure to the xp->security field; the security field * is initialized to NULL when the xfrm_policy is allocated. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) { return call_int_hook(xfrm_policy_alloc_security, ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); /** * security_xfrm_policy_clone() - Clone xfrm policy LSM state * @old_ctx: xfrm security context * @new_ctxp: target xfrm security context * * Allocate a security structure in new_ctxp that contains the information from * the old_ctx structure. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { return call_int_hook(xfrm_policy_clone_security, old_ctx, new_ctxp); } /** * security_xfrm_policy_free() - Free a xfrm security context * @ctx: xfrm security context * * Free LSM resources associated with @ctx. */ void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { call_void_hook(xfrm_policy_free_security, ctx); } EXPORT_SYMBOL(security_xfrm_policy_free); /** * security_xfrm_policy_delete() - Check if deleting a xfrm policy is allowed * @ctx: xfrm security context * * Authorize deletion of a SPD entry. * * Return: Returns 0 if permission is granted. */ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { return call_int_hook(xfrm_policy_delete_security, ctx); } /** * security_xfrm_state_alloc() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @sec_ctx: security label provided by userspace * * Allocate a security structure to the @x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to @sec_ctx. * * Return: Return 0 if operation was successful. */ int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return call_int_hook(xfrm_state_alloc, x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); /** * security_xfrm_state_alloc_acquire() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @polsec: associated policy's security context * @secid: secid from the flow * * Allocate a security structure to the x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to secid. * * Return: Returns 0 if operation was successful. */ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { return call_int_hook(xfrm_state_alloc_acquire, x, polsec, secid); } /** * security_xfrm_state_delete() - Check if deleting a xfrm state is allowed * @x: xfrm state * * Authorize deletion of x->security. * * Return: Returns 0 if permission is granted. */ int security_xfrm_state_delete(struct xfrm_state *x) { return call_int_hook(xfrm_state_delete_security, x); } EXPORT_SYMBOL(security_xfrm_state_delete); /** * security_xfrm_state_free() - Free a xfrm state * @x: xfrm state * * Deallocate x->security. */ void security_xfrm_state_free(struct xfrm_state *x) { call_void_hook(xfrm_state_free_security, x); } /** * security_xfrm_policy_lookup() - Check if using a xfrm policy is allowed * @ctx: target xfrm security context * @fl_secid: flow secid used to authorize access * * Check permission when a flow selects a xfrm_policy for processing XFRMs on a * packet. The hook is called when selecting either a per-socket policy or a * generic xfrm policy. * * Return: Return 0 if permission is granted, -ESRCH otherwise, or -errno on * other errors. */ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) { return call_int_hook(xfrm_policy_lookup, ctx, fl_secid); } /** * security_xfrm_state_pol_flow_match() - Check for a xfrm match * @x: xfrm state to match * @xp: xfrm policy to check for a match * @flic: flow to check for a match. * * Check @xp and @flic for a match with @x. * * Return: Returns 1 if there is a match. */ int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) { struct lsm_static_call *scall; int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); /* * Since this function is expected to return 0 or 1, the judgment * becomes difficult if multiple LSMs supply this call. Fortunately, * we can use the first LSM's judgment because currently only SELinux * supplies this call. * * For speed optimization, we explicitly break the loop rather than * using the macro */ lsm_for_each_hook(scall, xfrm_state_pol_flow_match) { rc = scall->hl->hook.xfrm_state_pol_flow_match(x, xp, flic); break; } return rc; } /** * security_xfrm_decode_session() - Determine the xfrm secid for a packet * @skb: xfrm packet * @secid: secid * * Decode the packet in @skb and return the security label in @secid. * * Return: Return 0 if all xfrms used have the same secid. */ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) { return call_int_hook(xfrm_decode_session, skb, secid, 1); } void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) { int rc = call_int_hook(xfrm_decode_session, skb, &flic->flowic_secid, 0); BUG_ON(rc); } EXPORT_SYMBOL(security_skb_classify_flow); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS /** * security_key_alloc() - Allocate and initialize a kernel key LSM blob * @key: key * @cred: credentials * @flags: allocation flags * * Permit allocation of a key and assign security data. Note that key does not * have a serial number assigned at this point. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { int rc = lsm_key_alloc(key); if (unlikely(rc)) return rc; rc = call_int_hook(key_alloc, key, cred, flags); if (unlikely(rc)) security_key_free(key); return rc; } /** * security_key_free() - Free a kernel key LSM blob * @key: key * * Notification of destruction; free security data. */ void security_key_free(struct key *key) { kfree(key->security); key->security = NULL; } /** * security_key_permission() - Check if a kernel key operation is allowed * @key_ref: key reference * @cred: credentials of actor requesting access * @need_perm: requested permissions * * See whether a specific operational right is granted to a process on a key. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_permission(key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) { return call_int_hook(key_permission, key_ref, cred, need_perm); } /** * security_key_getsecurity() - Get the key's security label * @key: key * @buffer: security label buffer * * Get a textual representation of the security context attached to a key for * the purposes of honouring KEYCTL_GETSECURITY. This function allocates the * storage for the NUL-terminated string and the caller should free it. * * Return: Returns the length of @buffer (including terminating NUL) or -ve if * an error occurs. May also return 0 (and a NULL buffer pointer) if * there is no security label assigned to the key. */ int security_key_getsecurity(struct key *key, char **buffer) { *buffer = NULL; return call_int_hook(key_getsecurity, key, buffer); } /** * security_key_post_create_or_update() - Notification of key create or update * @keyring: keyring to which the key is linked to * @key: created or updated key * @payload: data used to instantiate or update the key * @payload_len: length of payload * @flags: key flags * @create: flag indicating whether the key was created or updated * * Notify the caller of a key creation or update. */ void security_key_post_create_or_update(struct key *keyring, struct key *key, const void *payload, size_t payload_len, unsigned long flags, bool create) { call_void_hook(key_post_create_or_update, keyring, key, payload, payload_len, flags, create); } #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT /** * security_audit_rule_init() - Allocate and init an LSM audit rule struct * @field: audit action * @op: rule operator * @rulestr: rule context * @lsmrule: receive buffer for audit rule struct * @gfp: GFP flag used for kmalloc * * Allocate and initialize an LSM audit rule structure. * * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of * an invalid rule. */ int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) { return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp); } /** * security_audit_rule_known() - Check if an audit rule contains LSM fields * @krule: audit rule * * Specifies whether given @krule contains any fields related to the current * LSM. * * Return: Returns 1 in case of relation found, 0 otherwise. */ int security_audit_rule_known(struct audit_krule *krule) { return call_int_hook(audit_rule_known, krule); } /** * security_audit_rule_free() - Free an LSM audit rule struct * @lsmrule: audit rule struct * * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init(). */ void security_audit_rule_free(void *lsmrule) { call_void_hook(audit_rule_free, lsmrule); } /** * security_audit_rule_match() - Check if a label matches an audit rule * @prop: security label * @field: LSM audit field * @op: matching operator * @lsmrule: audit rule * * Determine if given @secid matches a rule previously approved by * security_audit_rule_known(). * * Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on * failure. */ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, void *lsmrule) { return call_int_hook(audit_rule_match, prop, field, op, lsmrule); } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL /** * security_bpf() - Check if the bpf syscall operation is allowed * @cmd: command * @attr: bpf attribute * @size: size * @kernel: whether or not call originated from kernel * * Do a initial check for all bpf syscalls after the attribute is copied into * the kernel. The actual security module can implement their own rules to * check the specific cmd they need. * * Return: Returns 0 if permission is granted. */ int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { return call_int_hook(bpf, cmd, attr, size, kernel); } /** * security_bpf_map() - Check if access to a bpf map is allowed * @map: bpf map * @fmode: mode * * Do a check when the kernel generates and returns a file descriptor for eBPF * maps. * * Return: Returns 0 if permission is granted. */ int security_bpf_map(struct bpf_map *map, fmode_t fmode) { return call_int_hook(bpf_map, map, fmode); } /** * security_bpf_prog() - Check if access to a bpf program is allowed * @prog: bpf program * * Do a check when the kernel generates and returns a file descriptor for eBPF * programs. * * Return: Returns 0 if permission is granted. */ int security_bpf_prog(struct bpf_prog *prog) { return call_int_hook(bpf_prog, prog); } /** * security_bpf_map_create() - Check if BPF map creation is allowed * @map: BPF map object * @attr: BPF syscall attributes used to create BPF map * @token: BPF token used to grant user access * @kernel: whether or not call originated from kernel * * Do a check when the kernel creates a new BPF map. This is also the * point where LSM blob is allocated for LSMs that need them. * * Return: Returns 0 on success, error on failure. */ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) { int rc; rc = lsm_bpf_map_alloc(map); if (unlikely(rc)) return rc; rc = call_int_hook(bpf_map_create, map, attr, token, kernel); if (unlikely(rc)) security_bpf_map_free(map); return rc; } /** * security_bpf_prog_load() - Check if loading of BPF program is allowed * @prog: BPF program object * @attr: BPF syscall attributes used to create BPF program * @token: BPF token used to grant user access to BPF subsystem * @kernel: whether or not call originated from kernel * * Perform an access control check when the kernel loads a BPF program and * allocates associated BPF program object. This hook is also responsible for * allocating any required LSM state for the BPF program. * * Return: Returns 0 on success, error on failure. */ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) { int rc; rc = lsm_bpf_prog_alloc(prog); if (unlikely(rc)) return rc; rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel); if (unlikely(rc)) security_bpf_prog_free(prog); return rc; } /** * security_bpf_token_create() - Check if creating of BPF token is allowed * @token: BPF token object * @attr: BPF syscall attributes used to create BPF token * @path: path pointing to BPF FS mount point from which BPF token is created * * Do a check when the kernel instantiates a new BPF token object from BPF FS * instance. This is also the point where LSM blob can be allocated for LSMs. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { int rc; rc = lsm_bpf_token_alloc(token); if (unlikely(rc)) return rc; rc = call_int_hook(bpf_token_create, token, attr, path); if (unlikely(rc)) security_bpf_token_free(token); return rc; } /** * security_bpf_token_cmd() - Check if BPF token is allowed to delegate * requested BPF syscall command * @token: BPF token object * @cmd: BPF syscall command requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF syscall command. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd) { return call_int_hook(bpf_token_cmd, token, cmd); } /** * security_bpf_token_capable() - Check if BPF token is allowed to delegate * requested BPF-related capability * @token: BPF token object * @cap: capabilities requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF-related capabilities. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_capable(const struct bpf_token *token, int cap) { return call_int_hook(bpf_token_capable, token, cap); } /** * security_bpf_map_free() - Free a bpf map's LSM blob * @map: bpf map * * Clean up the security information stored inside bpf map. */ void security_bpf_map_free(struct bpf_map *map) { call_void_hook(bpf_map_free, map); kfree(map->security); map->security = NULL; } /** * security_bpf_prog_free() - Free a BPF program's LSM blob * @prog: BPF program struct * * Clean up the security information stored inside BPF program. */ void security_bpf_prog_free(struct bpf_prog *prog) { call_void_hook(bpf_prog_free, prog); kfree(prog->aux->security); prog->aux->security = NULL; } /** * security_bpf_token_free() - Free a BPF token's LSM blob * @token: BPF token struct * * Clean up the security information stored inside BPF token. */ void security_bpf_token_free(struct bpf_token *token) { call_void_hook(bpf_token_free, token); kfree(token->security); token->security = NULL; } #endif /* CONFIG_BPF_SYSCALL */ /** * security_locked_down() - Check if a kernel feature is allowed * @what: requested kernel feature * * Determine whether a kernel feature that potentially enables arbitrary code * execution in kernel space should be permitted. * * Return: Returns 0 if permission is granted. */ int security_locked_down(enum lockdown_reason what) { return call_int_hook(locked_down, what); } EXPORT_SYMBOL(security_locked_down); /** * security_bdev_alloc() - Allocate a block device LSM blob * @bdev: block device * * Allocate and attach a security structure to @bdev->bd_security. The * security field is initialized to NULL when the bdev structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_bdev_alloc(struct block_device *bdev) { int rc = 0; rc = lsm_bdev_alloc(bdev); if (unlikely(rc)) return rc; rc = call_int_hook(bdev_alloc_security, bdev); if (unlikely(rc)) security_bdev_free(bdev); return rc; } EXPORT_SYMBOL(security_bdev_alloc); /** * security_bdev_free() - Free a block device's LSM blob * @bdev: block device * * Deallocate the bdev security structure and set @bdev->bd_security to NULL. */ void security_bdev_free(struct block_device *bdev) { if (!bdev->bd_security) return; call_void_hook(bdev_free_security, bdev); kfree(bdev->bd_security); bdev->bd_security = NULL; } EXPORT_SYMBOL(security_bdev_free); /** * security_bdev_setintegrity() - Set the device's integrity data * @bdev: block device * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a bdev with LSMs. * LSMs should free the previously saved data if @value is NULL. * Please note that the new hook should be invoked every time the security * information is updated to keep these data current. For example, in dm-verity, * if the mapping table is reloaded and configured to use a different dm-verity * target with a new roothash and signing information, the previously stored * data in the LSM blob will become obsolete. It is crucial to re-invoke the * hook to refresh these data and ensure they are up to date. This necessity * arises from the design of device-mapper, where a device-mapper device is * first created, and then targets are subsequently loaded into it. These * targets can be modified multiple times during the device's lifetime. * Therefore, while the LSM blob is allocated during the creation of the block * device, its actual contents are not initialized at this stage and can change * substantially over time. This includes alterations from data that the LSMs * 'trusts' to those they do not, making it essential to handle these changes * correctly. Failure to address this dynamic aspect could potentially allow * for bypassing LSM checks. * * Return: Returns 0 on success, negative values on failure. */ int security_bdev_setintegrity(struct block_device *bdev, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(bdev_setintegrity, bdev, type, value, size); } EXPORT_SYMBOL(security_bdev_setintegrity); #ifdef CONFIG_PERF_EVENTS /** * security_perf_event_open() - Check if a perf event open is allowed * @type: type of event * * Check whether the @type of perf_event_open syscall is allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_open(int type) { return call_int_hook(perf_event_open, type); } /** * security_perf_event_alloc() - Allocate a perf event LSM blob * @event: perf event * * Allocate and save perf_event security info. * * Return: Returns 0 on success, error on failure. */ int security_perf_event_alloc(struct perf_event *event) { int rc; rc = lsm_blob_alloc(&event->security, blob_sizes.lbs_perf_event, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(perf_event_alloc, event); if (rc) { kfree(event->security); event->security = NULL; } return rc; } /** * security_perf_event_free() - Free a perf event LSM blob * @event: perf event * * Release (free) perf_event security info. */ void security_perf_event_free(struct perf_event *event) { kfree(event->security); event->security = NULL; } /** * security_perf_event_read() - Check if reading a perf event label is allowed * @event: perf event * * Read perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_read(struct perf_event *event) { return call_int_hook(perf_event_read, event); } /** * security_perf_event_write() - Check if writing a perf event label is allowed * @event: perf event * * Write perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_write(struct perf_event *event) { return call_int_hook(perf_event_write, event); } #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_IO_URING /** * security_uring_override_creds() - Check if overriding creds is allowed * @new: new credentials * * Check if the current task, executing an io_uring operation, is allowed to * override it's credentials with @new. * * Return: Returns 0 if permission is granted. */ int security_uring_override_creds(const struct cred *new) { return call_int_hook(uring_override_creds, new); } /** * security_uring_sqpoll() - Check if IORING_SETUP_SQPOLL is allowed * * Check whether the current task is allowed to spawn a io_uring polling thread * (IORING_SETUP_SQPOLL). * * Return: Returns 0 if permission is granted. */ int security_uring_sqpoll(void) { return call_int_hook(uring_sqpoll); } /** * security_uring_cmd() - Check if a io_uring passthrough command is allowed * @ioucmd: command * * Check whether the file_operations uring_cmd is allowed to run. * * Return: Returns 0 if permission is granted. */ int security_uring_cmd(struct io_uring_cmd *ioucmd) { return call_int_hook(uring_cmd, ioucmd); } /** * security_uring_allowed() - Check if io_uring_setup() is allowed * * Check whether the current task is allowed to call io_uring_setup(). * * Return: Returns 0 if permission is granted. */ int security_uring_allowed(void) { return call_int_hook(uring_allowed); } #endif /* CONFIG_IO_URING */ /** * security_initramfs_populated() - Notify LSMs that initramfs has been loaded * * Tells the LSMs the initramfs has been unpacked into the rootfs. */ void security_initramfs_populated(void) { call_void_hook(initramfs_populated); } |
| 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 | // SPDX-License-Identifier: GPL-2.0-only /* * Linux LED driver for RTL8187 * * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net> * * Based on the LED handling in the r8187 driver, which is: * Copyright (c) Realtek Semiconductor Corp. All rights reserved. * * Thanks to Realtek for their support! */ #ifdef CONFIG_RTL8187_LEDS #include <net/mac80211.h> #include <linux/usb.h> #include <linux/eeprom_93cx6.h> #include "rtl8187.h" #include "leds.h" static void led_turn_on(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_on.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } static void led_turn_off(struct work_struct *work) { /* As this routine does read/write operations on the hardware, it must * be run from a work queue. */ u8 reg; struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_off.work); struct rtl8187_led *led = &priv->led_tx; /* Don't change the LED, when the device is down. */ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) return ; /* Skip if the LED is not registered. */ if (!led->dev) return; mutex_lock(&priv->conf_mutex); switch (led->ledpin) { case LED_PIN_GPIO0: rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01); break; case LED_PIN_LED0: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_LED1: reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5); rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); break; case LED_PIN_HW: default: break; } mutex_unlock(&priv->conf_mutex); } /* Callback from the LED subsystem. */ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led, led_dev); struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv; static bool radio_on; if (!hw) return; priv = hw->priv; if (led->is_radio) { if (brightness == LED_FULL) { ieee80211_queue_delayed_work(hw, &priv->led_on, 0); radio_on = true; } else if (radio_on) { radio_on = false; cancel_delayed_work(&priv->led_on); ieee80211_queue_delayed_work(hw, &priv->led_off, 0); } } else if (radio_on) { if (brightness == LED_OFF) { ieee80211_queue_delayed_work(hw, &priv->led_off, 0); /* The LED is off for 1/20 sec - it just blinks. */ ieee80211_queue_delayed_work(hw, &priv->led_on, HZ / 20); } else ieee80211_queue_delayed_work(hw, &priv->led_on, 0); } } static int rtl8187_register_led(struct ieee80211_hw *dev, struct rtl8187_led *led, const char *name, const char *default_trigger, u8 ledpin, bool is_radio) { int err; struct rtl8187_priv *priv = dev->priv; if (led->dev) return -EEXIST; if (!default_trigger) return -EINVAL; led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; strscpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; led->led_dev.brightness_set = rtl8187_led_brightness_set; err = led_classdev_register(&priv->udev->dev, &led->led_dev); if (err) { printk(KERN_INFO "LEDs: Failed to register %s\n", name); led->dev = NULL; return err; } return 0; } static void rtl8187_unregister_led(struct rtl8187_led *led) { struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv = hw->priv; led_classdev_unregister(&led->led_dev); flush_delayed_work(&priv->led_off); led->dev = NULL; } void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid) { struct rtl8187_priv *priv = dev->priv; char name[RTL8187_LED_MAX_NAME_LEN + 1]; u8 ledpin; int err; /* According to the vendor driver, the LED operation depends on the * customer ID encoded in the EEPROM */ printk(KERN_INFO "rtl8187: Customer ID is 0x%02X\n", custid); switch (custid) { case EEPROM_CID_RSVD0: case EEPROM_CID_RSVD1: case EEPROM_CID_SERCOMM_PS: case EEPROM_CID_QMI: case EEPROM_CID_DELL: case EEPROM_CID_TOSHIBA: ledpin = LED_PIN_GPIO0; break; case EEPROM_CID_ALPHA0: ledpin = LED_PIN_LED0; break; case EEPROM_CID_HW: ledpin = LED_PIN_HW; break; default: ledpin = LED_PIN_GPIO0; } INIT_DELAYED_WORK(&priv->led_on, led_turn_on); INIT_DELAYED_WORK(&priv->led_off, led_turn_off); snprintf(name, sizeof(name), "rtl8187-%s::radio", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_radio, name, ieee80211_get_radio_led_name(dev), ledpin, true); if (err) return; snprintf(name, sizeof(name), "rtl8187-%s::tx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_tx, name, ieee80211_get_tx_led_name(dev), ledpin, false); if (err) goto err_tx; snprintf(name, sizeof(name), "rtl8187-%s::rx", wiphy_name(dev->wiphy)); err = rtl8187_register_led(dev, &priv->led_rx, name, ieee80211_get_rx_led_name(dev), ledpin, false); if (!err) return; /* registration of RX LED failed - unregister */ rtl8187_unregister_led(&priv->led_tx); err_tx: rtl8187_unregister_led(&priv->led_radio); } void rtl8187_leds_exit(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; rtl8187_unregister_led(&priv->led_radio); rtl8187_unregister_led(&priv->led_rx); rtl8187_unregister_led(&priv->led_tx); cancel_delayed_work_sync(&priv->led_off); cancel_delayed_work_sync(&priv->led_on); } #endif /* def CONFIG_RTL8187_LEDS */ |
| 12 6 6 6 4 4 4 4 3 2 2 4 4 4 2 6 6 5 5 5 5 5 1 4 3 4 4 4 5 5 4 5 2 2 2 2 2 2 2 2 2 1 2 2 21 21 21 21 21 1 20 20 20 20 1 2 21 2 2 446 451 10 446 5 451 447 444 5 4 451 5 1 5 1 1 3 2 1 1 1 1 2 1 3 1 2 1 1 1 1 1 12 12 12 12 12 7 5 3 1 12 60 6 6 5 5 60 396 397 155 398 398 398 398 447 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 | // SPDX-License-Identifier: GPL-2.0-only /* * HID raw devices, giving access to raw HID events. * * In comparison to hiddev, this device does not process the * hid events at all (no parsing, no lookups). This lets applications * to work on raw hid events as they want to, and avoids a need to * use a transport-specific userspace libhid/libusb libraries. * * Copyright (c) 2007-2014 Jiri Kosina */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/hidraw.h> static int hidraw_major; static struct cdev hidraw_cdev; static const struct class hidraw_class = { .name = "hidraw", }; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DECLARE_RWSEM(minors_rwsem); static inline bool hidraw_is_revoked(struct hidraw_list *list) { return list->revoked; } static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); if (hidraw_is_revoked(list)) return -ENODEV; mutex_lock(&list->read_mutex); while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hidraw->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!list->hidraw->exist) { ret = -EIO; break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } /* allow O_NONBLOCK to work well from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hidraw->wait, &wait); } if (ret) goto out; len = list->buffer[list->tail].len > count ? count : list->buffer[list->tail].len; if (list->buffer[list->tail].value) { if (copy_to_user(buffer, list->buffer[list->tail].value, len)) { ret = -EFAULT; goto out; } ret = len; } kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } out: mutex_unlock(&list->read_mutex); return ret; } /* * The first byte of the report buffer is expected to be a report number. */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = memdup_user(buffer, count); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out; } if ((report_type == HID_OUTPUT_REPORT) && !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) { ret = __hid_hw_output_report(dev, buf, count, (u64)(long)file, false); /* * compatibility with old implementation of USB-HID and I2C-HID: * if the device does not support receiving output reports, * on an interrupt endpoint, fallback to SET_REPORT HID command. */ if (ret != -ENOSYS) goto out_free; } ret = __hid_hw_raw_request(dev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT, (u64)(long)file, false); out_free: kfree(buf); out: return ret; } static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; ssize_t ret; down_read(&minors_rwsem); if (hidraw_is_revoked(list)) ret = -ENODEV; else ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); up_read(&minors_rwsem); return ret; } /* * This function performs a Get_Report transfer over the control endpoint * per section 7.2.1 of the HID specification, version 1.1. The first byte * of buffer is the report number to request, or 0x0 if the device does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT * or HID_INPUT_REPORT. */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0, len; unsigned char report_number; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { ret = -ENODEV; goto out; } if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = kmalloc(count, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } /* * Read the first byte from the user. This is the report number, * which is passed to hid_hw_raw_request(). */ if (copy_from_user(&report_number, buffer, 1)) { ret = -EFAULT; goto out_free; } ret = __hid_hw_raw_request(dev, report_number, buf, count, report_type, HID_REQ_GET_REPORT, (u64)(long)file, false); if (ret < 0) goto out_free; len = (ret < count) ? ret : count; if (copy_to_user(buffer, buf, len)) { ret = -EFAULT; goto out_free; } ret = len; out_free: kfree(buf); out: return ret; } static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist || hidraw_is_revoked(list)) mask |= EPOLLERR | EPOLLHUP; return mask; } static int hidraw_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list; unsigned long flags; int err = 0; if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } /* * Technically not writing to the hidraw_table but a write lock is * required to protect the device refcount. This is symmetrical to * hidraw_release(). */ down_write(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; } dev = hidraw_table[minor]; if (!dev->open++) { err = hid_hw_power(dev->hid, PM_HINT_FULLON); if (err < 0) { dev->open--; goto out_unlock; } err = hid_hw_open(dev->hid); if (err < 0) { hid_hw_power(dev->hid, PM_HINT_NORMAL); dev->open--; goto out_unlock; } } list->hidraw = hidraw_table[minor]; mutex_init(&list->read_mutex); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); list_add_tail(&list->node, &hidraw_table[minor]->list); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); file->private_data = list; out_unlock: up_write(&minors_rwsem); out: if (err < 0) kfree(list); return err; } static int hidraw_fasync(int fd, struct file *file, int on) { struct hidraw_list *list = file->private_data; if (hidraw_is_revoked(list)) return -ENODEV; return fasync_helper(fd, file, on, &list->fasync); } static void drop_ref(struct hidraw *hidraw, int exists_bit) { if (exists_bit) { hidraw->exist = 0; if (hidraw->open) { hid_hw_close(hidraw->hid); wake_up_interruptible(&hidraw->wait); } device_destroy(&hidraw_class, MKDEV(hidraw_major, hidraw->minor)); } else { --hidraw->open; } if (!hidraw->open) { if (!hidraw->exist) { hidraw_table[hidraw->minor] = NULL; kfree(hidraw); } else { /* close device for last reader */ hid_hw_close(hidraw->hid); hid_hw_power(hidraw->hid, PM_HINT_NORMAL); } } } static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct hidraw_list *list = file->private_data; unsigned long flags; down_write(&minors_rwsem); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); while (list->tail != list->head) { kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } list_del(&list->node); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); kfree(list); drop_ref(hidraw_table[minor], 0); up_write(&minors_rwsem); return 0; } static int hidraw_revoke(struct hidraw_list *list) { list->revoked = true; return 0; } static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *arg) { struct hid_device *hid = dev->hid; switch (cmd) { case HIDIOCGRDESCSIZE: if (put_user(hid->rsize, (int __user *)arg)) return -EFAULT; break; case HIDIOCGRDESC: { __u32 len; if (get_user(len, (int __user *)arg)) return -EFAULT; if (len > HID_MAX_DESCRIPTOR_SIZE - 1) return -EINVAL; if (copy_to_user(arg + offsetof( struct hidraw_report_descriptor, value[0]), hid->rdesc, min(hid->rsize, len))) return -EFAULT; break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; dinfo.bustype = hid->bus; dinfo.vendor = hid->vendor; dinfo.product = hid->product; if (copy_to_user(arg, &dinfo, sizeof(dinfo))) return -EFAULT; break; } case HIDIOCREVOKE: { struct hidraw_list *list = file->private_data; if (arg) return -EINVAL; return hidraw_revoke(list); } default: /* * None of the above ioctls can return -EAGAIN, so * use it as a marker that we need to check variable * length ioctls. */ return -EAGAIN; } return 0; } static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *user_arg) { int len = _IOC_SIZE(cmd); switch (cmd & ~IOCSIZE_MASK) { case HIDIOCSFEATURE(0): return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); case HIDIOCGFEATURE(0): return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); case HIDIOCSINPUT(0): return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); case HIDIOCGINPUT(0): return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); case HIDIOCSOUTPUT(0): return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); case HIDIOCGOUTPUT(0): return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); } return -EINVAL; } static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *user_arg) { struct hid_device *hid = dev->hid; int len = _IOC_SIZE(cmd); int field_len; switch (cmd & ~IOCSIZE_MASK) { case HIDIOCGRAWNAME(0): field_len = strlen(hid->name) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; case HIDIOCGRAWPHYS(0): field_len = strlen(hid->phys) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; case HIDIOCGRAWUNIQ(0): field_len = strlen(hid->uniq) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len; } return -EINVAL; } static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list = file->private_data; void __user *user_arg = (void __user *)arg; int ret; down_read(&minors_rwsem); dev = hidraw_table[minor]; if (!dev || !dev->exist || hidraw_is_revoked(list)) { ret = -ENODEV; goto out; } if (_IOC_TYPE(cmd) != 'H') { ret = -EINVAL; goto out; } if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) { ret = -ENOTTY; goto out; } ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg); if (ret != -EAGAIN) goto out; switch (_IOC_DIR(cmd)) { case (_IOC_READ | _IOC_WRITE): ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg); break; case _IOC_READ: ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg); break; default: /* Any other IOC_DIR is wrong */ ret = -EINVAL; } out: up_read(&minors_rwsem); return ret; } static const struct file_operations hidraw_ops = { .owner = THIS_MODULE, .read = hidraw_read, .write = hidraw_write, .poll = hidraw_poll, .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, .fasync = hidraw_fasync, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { struct hidraw *dev = hid->hidraw; struct hidraw_list *list; int ret = 0; unsigned long flags; spin_lock_irqsave(&dev->list_lock, flags); list_for_each_entry(list, &dev->list, node) { int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); if (hidraw_is_revoked(list) || new_head == list->tail) continue; if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) { ret = -ENOMEM; break; } list->buffer[list->head].len = len; list->head = new_head; kill_fasync(&list->fasync, SIGIO, POLL_IN); } spin_unlock_irqrestore(&dev->list_lock, flags); wake_up_interruptible(&dev->wait); return ret; } EXPORT_SYMBOL_GPL(hidraw_report_event); int hidraw_connect(struct hid_device *hid) { int minor, result; struct hidraw *dev; /* we accept any HID device, all applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) return -ENOMEM; result = -EINVAL; down_write(&minors_rwsem); for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { if (hidraw_table[minor]) continue; hidraw_table[minor] = dev; result = 0; break; } if (result) { up_write(&minors_rwsem); kfree(dev); goto out; } dev->dev = device_create(&hidraw_class, &hid->dev, MKDEV(hidraw_major, minor), NULL, "%s%d", "hidraw", minor); if (IS_ERR(dev->dev)) { hidraw_table[minor] = NULL; up_write(&minors_rwsem); result = PTR_ERR(dev->dev); kfree(dev); goto out; } init_waitqueue_head(&dev->wait); spin_lock_init(&dev->list_lock); INIT_LIST_HEAD(&dev->list); dev->hid = hid; dev->minor = minor; dev->exist = 1; hid->hidraw = dev; up_write(&minors_rwsem); out: return result; } EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; down_write(&minors_rwsem); drop_ref(hidraw, 1); up_write(&minors_rwsem); } EXPORT_SYMBOL_GPL(hidraw_disconnect); int __init hidraw_init(void) { int result; dev_t dev_id; result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR, HIDRAW_MAX_DEVICES, "hidraw"); if (result < 0) { pr_warn("can't get major number\n"); goto out; } hidraw_major = MAJOR(dev_id); result = class_register(&hidraw_class); if (result) goto error_cdev; cdev_init(&hidraw_cdev, &hidraw_ops); result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); if (result < 0) goto error_class; pr_info("raw HID events driver (C) Jiri Kosina\n"); out: return result; error_class: class_unregister(&hidraw_class); error_cdev: unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); goto out; } void hidraw_exit(void) { dev_t dev_id = MKDEV(hidraw_major, 0); cdev_del(&hidraw_cdev); class_unregister(&hidraw_class); unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); } |
| 9 9 9 1 9 9 9 9 9 8 8 8 8 8 8 8 8 5 5 5 4 3 3 3 2 1 5 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Pixart PAC7302 driver * * Copyright (C) 2008-2012 Jean-Francois Moine <http://moinejf.free.fr> * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li * * Separated from Pixart PAC7311 library by Márton Németh * Camera button input handling by Márton Németh <nm127@freemail.hu> * Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu> */ /* * Some documentation about various registers as determined by trial and error. * * Register page 0: * * Address Description * 0x01 Red balance control * 0x02 Green balance control * 0x03 Blue balance control * The Windows driver uses a quadratic approach to map * the settable values (0-200) on register values: * min=0x20, default=0x40, max=0x80 * 0x0f-0x20 Color and saturation control * 0xa2-0xab Brightness, contrast and gamma control * 0xb6 Sharpness control (bits 0-4) * * Register page 1: * * Address Description * 0x78 Global control, bit 6 controls the LED (inverted) * 0x80 Compression balance, 2 interesting settings: * 0x0f Default * 0x50 Values >= this switch the camera to a lower compression, * using the same table for both luminance and chrominance. * This gives a sharper picture. Only usable when running * at < 15 fps! Note currently the driver does not use this * as the quality gain is small and the generated JPG-s are * only understood by v4l-utils >= 0.8.9 * * Register page 3: * * Address Description * 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12? * 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps * 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps, * 63 -> ~27 fps, the 2 msb's must always be 1 !! * 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0: * 1 -> ~30 fps, 2 -> ~20 fps * 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time * 0x0f Exposure bit 8, 0-448, 448 = no exposure at all * 0x10 Gain 0-31 * 0x12 Another gain 0-31, unlike 0x10 this one seems to start with an * amplification value of 1 rather then 0 at its lowest setting * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused * 0x80 Another framerate control, best left at 1, moving it from 1 to * 2 causes the framerate to become 3/4th of what it was, and * also seems to cause pixel averaging, resulting in an effective * resolution of 320x240 and thus a much blockier image * * The registers are accessed in the following functions: * * Page | Register | Function * -----+------------+--------------------------------------------------- * 0 | 0x01 | setredbalance() * 0 | 0x03 | setbluebalance() * 0 | 0x0f..0x20 | setcolors() * 0 | 0xa2..0xab | setbrightcont() * 0 | 0xb6 | setsharpness() * 0 | 0xc6 | setwhitebalance() * 0 | 0xdc | setbrightcont(), setcolors() * 3 | 0x02 | setexposure() * 3 | 0x10, 0x12 | setgain() * 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip() * 3 | 0x21 | sethvflip() */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "gspca.h" /* Include pac common sof detection functions */ #include "pac_common.h" #define PAC7302_RGB_BALANCE_MIN 0 #define PAC7302_RGB_BALANCE_MAX 200 #define PAC7302_RGB_BALANCE_DEFAULT 100 #define PAC7302_GAIN_DEFAULT 15 #define PAC7302_GAIN_KNEE 42 #define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */ #define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Thomas Kaiser thomas@kaiser-linux.li"); MODULE_DESCRIPTION("Pixart PAC7302"); MODULE_LICENSE("GPL"); struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct { /* brightness / contrast cluster */ struct v4l2_ctrl *brightness; struct v4l2_ctrl *contrast; }; struct v4l2_ctrl *saturation; struct v4l2_ctrl *white_balance; struct v4l2_ctrl *red_balance; struct v4l2_ctrl *blue_balance; struct { /* flip cluster */ struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; struct v4l2_ctrl *sharpness; u8 flags; #define FL_HFLIP 0x01 /* mirrored by default */ #define FL_VFLIP 0x02 /* vertical flipped by default */ u8 sof_read; s8 autogain_ignore_frames; atomic_t avg_lum; }; static const struct v4l2_pix_format vga_mode[] = { {640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, }, }; #define LOAD_PAGE3 255 #define END_OF_SEQUENCE 0 static const u8 init_7302[] = { /* index,value */ 0xff, 0x01, /* page 1 */ 0x78, 0x00, /* deactivate */ 0xff, 0x01, 0x78, 0x40, /* led off */ }; static const u8 start_7302[] = { /* index, len, [value]* */ 0xff, 1, 0x00, /* page 0 */ 0x00, 12, 0x01, 0x40, 0x40, 0x40, 0x01, 0xe0, 0x02, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0d, 24, 0x03, 0x01, 0x00, 0xb5, 0x07, 0xcb, 0x00, 0x00, 0x07, 0xc8, 0x00, 0xea, 0x07, 0xcf, 0x07, 0xf7, 0x07, 0x7e, 0x01, 0x0b, 0x00, 0x00, 0x00, 0x11, 0x26, 2, 0xaa, 0xaa, 0x2e, 1, 0x31, 0x38, 1, 0x01, 0x3a, 3, 0x14, 0xff, 0x5a, 0x43, 11, 0x00, 0x0a, 0x18, 0x11, 0x01, 0x2c, 0x88, 0x11, 0x00, 0x54, 0x11, 0x55, 1, 0x00, 0x62, 4, 0x10, 0x1e, 0x1e, 0x18, 0x6b, 1, 0x00, 0x6e, 3, 0x08, 0x06, 0x00, 0x72, 3, 0x00, 0xff, 0x00, 0x7d, 23, 0x01, 0x01, 0x58, 0x46, 0x50, 0x3c, 0x50, 0x3c, 0x54, 0x46, 0x54, 0x56, 0x52, 0x50, 0x52, 0x50, 0x56, 0x64, 0xa4, 0x00, 0xda, 0x00, 0x00, 0xa2, 10, 0x22, 0x2c, 0x3c, 0x54, 0x69, 0x7c, 0x9c, 0xb9, 0xd2, 0xeb, 0xaf, 1, 0x02, 0xb5, 2, 0x08, 0x08, 0xb8, 2, 0x08, 0x88, 0xc4, 4, 0xae, 0x01, 0x04, 0x01, 0xcc, 1, 0x00, 0xd1, 11, 0x01, 0x30, 0x49, 0x5e, 0x6f, 0x7f, 0x8e, 0xa9, 0xc1, 0xd7, 0xec, 0xdc, 1, 0x01, 0xff, 1, 0x01, /* page 1 */ 0x12, 3, 0x02, 0x00, 0x01, 0x3e, 2, 0x00, 0x00, 0x76, 5, 0x01, 0x20, 0x40, 0x00, 0xf2, 0x7c, 1, 0x00, 0x7f, 10, 0x4b, 0x0f, 0x01, 0x2c, 0x02, 0x58, 0x03, 0x20, 0x02, 0x00, 0x96, 5, 0x01, 0x10, 0x04, 0x01, 0x04, 0xc8, 14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x07, 0x00, 0x01, 0x07, 0x04, 0x01, 0xd8, 1, 0x01, 0xdb, 2, 0x00, 0x01, 0xde, 7, 0x00, 0x01, 0x04, 0x04, 0x00, 0x00, 0x00, 0xe6, 4, 0x00, 0x00, 0x00, 0x01, 0xeb, 1, 0x00, 0xff, 1, 0x02, /* page 2 */ 0x22, 1, 0x00, 0xff, 1, 0x03, /* page 3 */ 0, LOAD_PAGE3, /* load the page 3 */ 0x11, 1, 0x01, 0xff, 1, 0x02, /* page 2 */ 0x13, 1, 0x00, 0x22, 4, 0x1f, 0xa4, 0xf0, 0x96, 0x27, 2, 0x14, 0x0c, 0x2a, 5, 0xc8, 0x00, 0x18, 0x12, 0x22, 0x64, 8, 0x00, 0x00, 0xf0, 0x01, 0x14, 0x44, 0x44, 0x44, 0x6e, 1, 0x08, 0xff, 1, 0x01, /* page 1 */ 0x78, 1, 0x00, 0, END_OF_SEQUENCE /* end of sequence */ }; #define SKIP 0xaa /* page 3 - the value SKIP says skip the index - see reg_w_page() */ static const u8 page3_7302[] = { 0x90, 0x40, 0x03, 0x00, 0xc0, 0x01, 0x14, 0x16, 0x14, 0x12, 0x00, 0x00, 0x00, 0x02, 0x33, 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x01, 0xb3, 0x01, 0x00, 0x00, 0x08, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x54, 0xf4, 0x02, 0x52, 0x54, 0xa4, 0xb8, 0xe0, 0x2a, 0xf6, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0xf2, 0x1f, 0x04, 0x00, 0x00, SKIP, 0x00, 0x00, 0xc0, 0xc0, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xff, 0x03, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc8, 0xc8, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x08, 0x10, 0x24, 0x40, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xfa, 0x00, 0x64, 0x5a, 0x28, 0x00, 0x00 }; static void reg_w_buf(struct gspca_dev *gspca_dev, u8 index, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w_buf failed i: %02x error %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = value; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w() failed i: %02x v: %02x error %d\n", index, value, ret); gspca_dev->usb_err = ret; } } static void reg_w_seq(struct gspca_dev *gspca_dev, const u8 *seq, int len) { while (--len >= 0) { reg_w(gspca_dev, seq[0], seq[1]); seq += 2; } } /* load the beginning of a page */ static void reg_w_page(struct gspca_dev *gspca_dev, const u8 *page, int len) { int index; int ret = 0; if (gspca_dev->usb_err < 0) return; for (index = 0; index < len; index++) { if (page[index] == SKIP) /* skip this index */ continue; gspca_dev->usb_buf[0] = page[index]; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w_page() failed i: %02x v: %02x error %d\n", index, page[index], ret); gspca_dev->usb_err = ret; break; } } } /* output a variable sequence */ static void reg_w_var(struct gspca_dev *gspca_dev, const u8 *seq, const u8 *page3, unsigned int page3_len) { int index, len; for (;;) { index = *seq++; len = *seq++; switch (len) { case END_OF_SEQUENCE: return; case LOAD_PAGE3: reg_w_page(gspca_dev, page3, page3_len); break; default: if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "Incorrect variable sequence\n"); return; } while (len > 0) { if (len < 8) { reg_w_buf(gspca_dev, index, seq, len); seq += len; break; } reg_w_buf(gspca_dev, index, seq, 8); seq += 8; index += 8; len -= 8; } } } /* not reached */ } /* this function is called at probe time for pac7302 */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; /* only 640x480 */ cam->nmodes = ARRAY_SIZE(vga_mode); sd->flags = id->driver_info; return 0; } static void setbrightcont(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v; static const u8 max[10] = {0x29, 0x33, 0x42, 0x5a, 0x6e, 0x80, 0x9f, 0xbb, 0xd4, 0xec}; static const u8 delta[10] = {0x35, 0x33, 0x33, 0x2f, 0x2a, 0x25, 0x1e, 0x17, 0x11, 0x0b}; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ for (i = 0; i < 10; i++) { v = max[i]; v += (sd->brightness->val - (s32)sd->brightness->maximum) * 150 / (s32)sd->brightness->maximum; /* 200 ? */ v -= delta[i] * sd->contrast->val / (s32)sd->contrast->maximum; if (v < 0) v = 0; else if (v > 0xff) v = 0xff; reg_w(gspca_dev, 0xa2 + i, v); } reg_w(gspca_dev, 0xdc, 0x01); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v; static const int a[9] = {217, -212, 0, -101, 170, -67, -38, -315, 355}; static const int b[9] = {19, 106, 0, 19, 106, 1, 19, 106, 1}; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x11, 0x01); reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ for (i = 0; i < 9; i++) { v = a[i] * sd->saturation->val / (s32)sd->saturation->maximum; v += b[i]; reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07); reg_w(gspca_dev, 0x0f + 2 * i + 1, v); } reg_w(gspca_dev, 0xdc, 0x01); } static void setwhitebalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0xc6, sd->white_balance->val); reg_w(gspca_dev, 0xdc, 0x01); } static u8 rgbbalance_ctrl_to_reg_value(s32 rgb_ctrl_val) { const unsigned int k = 1000; /* precision factor */ unsigned int norm; /* Normed value [0...k] */ norm = k * (rgb_ctrl_val - PAC7302_RGB_BALANCE_MIN) / (PAC7302_RGB_BALANCE_MAX - PAC7302_RGB_BALANCE_MIN); /* Qudratic apporach improves control at small (register) values: */ return 64 * norm * norm / (k*k) + 32 * norm / k + 32; /* Y = 64*X*X + 32*X + 32 * => register values 0x20-0x80; Windows driver uses these limits */ /* NOTE: for full value range (0x00-0xff) use * Y = 254*X*X + X * => 254 * norm * norm / (k*k) + 1 * norm / k */ } static void setredbalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0x01, rgbbalance_ctrl_to_reg_value(sd->red_balance->val)); reg_w(gspca_dev, 0xdc, 0x01); } static void setbluebalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0x03, rgbbalance_ctrl_to_reg_value(sd->blue_balance->val)); reg_w(gspca_dev, 0xdc, 0x01); } static void setgain(struct gspca_dev *gspca_dev) { u8 reg10, reg12; if (gspca_dev->gain->val < 32) { reg10 = gspca_dev->gain->val; reg12 = 0; } else { reg10 = 31; reg12 = gspca_dev->gain->val - 31; } reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x10, reg10); reg_w(gspca_dev, 0x12, reg12); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setexposure(struct gspca_dev *gspca_dev) { u8 clockdiv; u16 exposure; /* * Register 2 of frame 3 contains the clock divider configuring the * no fps according to the formula: 90 / reg. sd->exposure is the * desired exposure time in 0.5 ms. */ clockdiv = (90 * gspca_dev->exposure->val + 1999) / 2000; /* * Note clockdiv = 3 also works, but when running at 30 fps, depending * on the scene being recorded, the camera switches to another * quantization table for certain JPEG blocks, and we don't know how * to decompress these blocks. So we cap the framerate at 15 fps. */ if (clockdiv < 6) clockdiv = 6; else if (clockdiv > 63) clockdiv = 63; /* * Register 2 MUST be a multiple of 3, except when between 6 and 12? * Always round up, otherwise we cannot get the desired frametime * using the partial frame time exposure control. */ if (clockdiv < 6 || clockdiv > 12) clockdiv = ((clockdiv + 2) / 3) * 3; /* * frame exposure time in ms = 1000 * clockdiv / 90 -> * exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90) */ exposure = (gspca_dev->exposure->val * 45 * 448) / (1000 * clockdiv); /* 0 = use full frametime, 448 = no exposure, reverse it */ exposure = 448 - exposure; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x02, clockdiv); reg_w(gspca_dev, 0x0e, exposure & 0xff); reg_w(gspca_dev, 0x0f, exposure >> 8); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void sethvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 data, hflip, vflip; hflip = sd->hflip->val; if (sd->flags & FL_HFLIP) hflip = !hflip; vflip = sd->vflip->val; if (sd->flags & FL_VFLIP) vflip = !vflip; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ data = (hflip ? 0x08 : 0x00) | (vflip ? 0x04 : 0x00); reg_w(gspca_dev, 0x21, data); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0xb6, sd->sharpness->val); reg_w(gspca_dev, 0xdc, 0x01); } /* this function is called at probe and resume time for pac7302 */ static int sd_init(struct gspca_dev *gspca_dev) { reg_w_seq(gspca_dev, init_7302, sizeof(init_7302)/2); return gspca_dev->usb_err; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) { /* when switching to autogain set defaults to make sure we are on a valid point of the autogain gain / exposure knee graph, and give this change time to take effect before doing autogain. */ gspca_dev->exposure->val = PAC7302_EXPOSURE_DEFAULT; gspca_dev->gain->val = PAC7302_GAIN_DEFAULT; sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightcont(gspca_dev); break; case V4L2_CID_SATURATION: setcolors(gspca_dev); break; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: setwhitebalance(gspca_dev); break; case V4L2_CID_RED_BALANCE: setredbalance(gspca_dev); break; case V4L2_CID_BLUE_BALANCE: setbluebalance(gspca_dev); break; case V4L2_CID_AUTOGAIN: if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val)) setexposure(gspca_dev); if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val)) setgain(gspca_dev); break; case V4L2_CID_HFLIP: sethvflip(gspca_dev); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev); break; default: return -EINVAL; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 12); sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 32, 1, 16); sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 127); sd->white_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE, 0, 255, 1, 55); sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, PAC7302_RGB_BALANCE_MIN, PAC7302_RGB_BALANCE_MAX, 1, PAC7302_RGB_BALANCE_DEFAULT); sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, PAC7302_RGB_BALANCE_MIN, PAC7302_RGB_BALANCE_MAX, 1, PAC7302_RGB_BALANCE_DEFAULT); gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 1023, 1, PAC7302_EXPOSURE_DEFAULT); gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 62, 1, PAC7302_GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 15, 1, 8); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->brightness); v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); v4l2_ctrl_cluster(2, &sd->hflip); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_var(gspca_dev, start_7302, page3_7302, sizeof(page3_7302)); sd->sof_read = 0; sd->autogain_ignore_frames = 0; atomic_set(&sd->avg_lum, 270 + sd->brightness->val); /* start stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x01); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* stop stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x00); } /* called on streamoff with alt 0 and on disconnect for pac7302 */ static void sd_stop0(struct gspca_dev *gspca_dev) { if (!gspca_dev->present) return; reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x40); } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int avg_lum = atomic_read(&sd->avg_lum); int desired_lum; const int deadzone = 30; if (sd->autogain_ignore_frames < 0) return; if (sd->autogain_ignore_frames > 0) { sd->autogain_ignore_frames--; } else { desired_lum = 270 + sd->brightness->val; if (gspca_expo_autogain(gspca_dev, avg_lum, desired_lum, deadzone, PAC7302_GAIN_KNEE, PAC7302_EXPOSURE_KNEE)) sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } } /* JPEG header */ static const u8 jpeg_header[] = { 0xff, 0xd8, /* SOI: Start of Image */ 0xff, 0xc0, /* SOF0: Start of Frame (Baseline DCT) */ 0x00, 0x11, /* length = 17 bytes (including this length field) */ 0x08, /* Precision: 8 */ 0x02, 0x80, /* height = 640 (image rotated) */ 0x01, 0xe0, /* width = 480 */ 0x03, /* Number of image components: 3 */ 0x01, 0x21, 0x00, /* ID=1, Subsampling 1x1, Quantization table: 0 */ 0x02, 0x11, 0x01, /* ID=2, Subsampling 2x1, Quantization table: 1 */ 0x03, 0x11, 0x01, /* ID=3, Subsampling 2x1, Quantization table: 1 */ 0xff, 0xda, /* SOS: Start Of Scan */ 0x00, 0x0c, /* length = 12 bytes (including this length field) */ 0x03, /* number of components: 3 */ 0x01, 0x00, /* selector 1, table 0x00 */ 0x02, 0x11, /* selector 2, table 0x11 */ 0x03, 0x11, /* selector 3, table 0x11 */ 0x00, 0x3f, /* Spectral selection: 0 .. 63 */ 0x00 /* Successive approximation: 0 */ }; /* this function is run at interrupt level */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; u8 *image; u8 *sof; sof = pac_find_sof(gspca_dev, &sd->sof_read, data, len); if (sof) { int n, lum_offset, footer_length; /* * 6 bytes after the FF D9 EOF marker a number of lumination * bytes are send corresponding to different parts of the * image, the 14th and 15th byte after the EOF seem to * correspond to the center of the image. */ lum_offset = 61 + sizeof pac_sof_marker; footer_length = 74; /* Finish decoding current frame */ n = (sof - data) - (footer_length + sizeof pac_sof_marker); if (n < 0) { gspca_dev->image_len += n; } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, n); } image = gspca_dev->image; if (image != NULL && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xd9) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); n = sof - data; len -= n; data = sof; /* Get average lumination */ if (gspca_dev->last_packet_type == LAST_PACKET && n >= lum_offset) atomic_set(&sd->avg_lum, data[-lum_offset] + data[-lum_offset + 1]); /* Start the new frame with the jpeg header */ /* The PAC7302 has the image rotated 90 degrees */ gspca_frame_add(gspca_dev, FIRST_PACKET, jpeg_header, sizeof jpeg_header); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int sd_dbg_s_register(struct gspca_dev *gspca_dev, const struct v4l2_dbg_register *reg) { u8 index; u8 value; /* * reg->reg: bit0..15: reserved for register index (wIndex is 16bit * long on the USB bus) */ if (reg->match.addr == 0 && (reg->reg < 0x000000ff) && (reg->val <= 0x000000ff) ) { /* Currently writing to page 0 is only supported. */ /* reg_w() only supports 8bit index */ index = reg->reg; value = reg->val; /* * Note that there shall be no access to other page * by any other function between the page switch and * the actual register write. */ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, index, value); reg_w(gspca_dev, 0xdc, 0x01); } return gspca_dev->usb_err; } #endif #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; if (len == 2) { data0 = data[0]; data1 = data[1]; if ((data0 == 0x00 && data1 == 0x11) || (data0 == 0x22 && data1 == 0x33) || (data0 == 0x44 && data1 == 0x55) || (data0 == 0x66 && data1 == 0x77) || (data0 == 0x88 && data1 == 0x99) || (data0 == 0xaa && data1 == 0xbb) || (data0 == 0xcc && data1 == 0xdd) || (data0 == 0xee && data1 == 0xff)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } } return ret; } #endif /* sub-driver description for pac7302 */ static const struct sd_desc sd_desc = { .name = KBUILD_MODNAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #ifdef CONFIG_VIDEO_ADV_DEBUG .set_register = sd_dbg_s_register, #endif #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06f8, 0x3009)}, {USB_DEVICE(0x06f8, 0x301b)}, {USB_DEVICE(0x093a, 0x2620)}, {USB_DEVICE(0x093a, 0x2621)}, {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2625)}, {USB_DEVICE(0x093a, 0x2626)}, {USB_DEVICE(0x093a, 0x2627), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2628)}, {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x262a)}, {USB_DEVICE(0x093a, 0x262c)}, {USB_DEVICE(0x145f, 0x013c)}, {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = KBUILD_MODNAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
| 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include <linux/module.h> #include <linux/usb.h> #include "main.h" #include "rtw8723d.h" #include "usb.h" static const struct usb_device_id rtw_8723du_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xd723, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8723d_hw_spec) }, /* 8723DU 1*1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd611, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8723d_hw_spec) }, /* Edimax EW-7611ULB V2 */ { }, }; MODULE_DEVICE_TABLE(usb, rtw_8723du_id_table); static int rtw8723du_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); } static struct usb_driver rtw_8723du_driver = { .name = KBUILD_MODNAME, .id_table = rtw_8723du_id_table, .probe = rtw8723du_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8723du_driver); MODULE_AUTHOR("Hans Ulli Kroll <linux@ulli-kroll.de>"); MODULE_DESCRIPTION("Realtek 802.11n wireless 8723du driver"); MODULE_LICENSE("Dual BSD/GPL"); |
| 418 209 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/kernel.h> #include <linux/nospec.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/wordpart.h> /* out-of-line parts */ #if !defined(INLINE_COPY_FROM_USER) unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { return _inline_copy_from_user(to, from, n); } EXPORT_SYMBOL(_copy_from_user); #endif #if !defined(INLINE_COPY_TO_USER) unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { return _inline_copy_to_user(to, from, n); } EXPORT_SYMBOL(_copy_to_user); #endif /** * check_zeroed_user: check if a userspace buffer only contains zero bytes * @from: Source address, in userspace. * @size: Size of buffer. * * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for * userspace addresses (and is more efficient because we don't care where the * first non-zero byte is). * * Returns: * * 0: There were non-zero bytes present in the buffer. * * 1: The buffer was full of zero bytes. * * -EFAULT: access to userspace failed. */ int check_zeroed_user(const void __user *from, size_t size) { unsigned long val; uintptr_t align = (uintptr_t) from % sizeof(unsigned long); if (unlikely(size == 0)) return 1; from -= align; size += align; if (!user_read_access_begin(from, size)) return -EFAULT; unsafe_get_user(val, (unsigned long __user *) from, err_fault); if (align) val &= ~aligned_byte_mask(align); while (size > sizeof(unsigned long)) { if (unlikely(val)) goto done; from += sizeof(unsigned long); size -= sizeof(unsigned long); unsafe_get_user(val, (unsigned long __user *) from, err_fault); } if (size < sizeof(unsigned long)) val &= aligned_byte_mask(size); done: user_read_access_end(); return (val == 0); err_fault: user_read_access_end(); return -EFAULT; } EXPORT_SYMBOL(check_zeroed_user); |
| 3226 3221 3226 3230 3230 3225 3230 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 10685 10686 10687 10688 10689 10690 10691 10692 10693 10694 10695 10696 10697 10698 10699 10700 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10782 10783 10784 10785 10786 10787 10788 10789 10790 10791 10792 10793 10794 10795 10796 10797 10798 10799 10800 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10882 10883 10884 10885 10886 10887 10888 10889 10890 10891 10892 10893 10894 10895 10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 10982 10983 10984 10985 10986 10987 10988 10989 10990 10991 10992 10993 10994 10995 10996 10997 10998 10999 11000 11001 11002 11003 11004 11005 11006 11007 11008 11009 11010 11011 11012 11013 11014 11015 11016 11017 11018 11019 11020 11021 11022 11023 11024 11025 11026 11027 11028 11029 11030 11031 11032 11033 11034 11035 11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049 11050 11051 11052 11053 11054 11055 11056 11057 11058 11059 11060 11061 11062 11063 11064 11065 11066 11067 11068 11069 11070 11071 11072 11073 11074 11075 11076 11077 11078 11079 11080 11081 11082 11083 11084 11085 11086 11087 11088 11089 11090 11091 11092 11093 11094 11095 11096 11097 11098 11099 11100 11101 11102 11103 11104 11105 11106 11107 11108 11109 11110 11111 11112 11113 11114 11115 11116 11117 11118 11119 11120 11121 11122 11123 11124 11125 11126 11127 11128 11129 11130 11131 11132 11133 11134 11135 11136 11137 11138 11139 11140 11141 11142 11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155 11156 11157 11158 11159 11160 11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214 11215 11216 11217 11218 11219 11220 11221 11222 11223 11224 11225 11226 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238 11239 11240 11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261 11262 11263 11264 11265 11266 11267 11268 11269 11270 11271 11272 11273 11274 11275 11276 11277 11278 11279 11280 11281 11282 11283 11284 11285 11286 11287 11288 11289 11290 11291 11292 11293 11294 11295 11296 11297 11298 11299 11300 11301 11302 11303 11304 11305 11306 11307 11308 11309 11310 11311 11312 11313 11314 11315 11316 11317 11318 11319 11320 11321 11322 11323 11324 11325 11326 11327 11328 11329 11330 11331 11332 11333 11334 11335 11336 11337 11338 11339 11340 11341 11342 11343 11344 11345 11346 11347 11348 11349 11350 11351 11352 11353 11354 11355 11356 11357 11358 11359 11360 11361 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 11392 11393 11394 11395 11396 11397 11398 11399 11400 11401 11402 11403 11404 11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421 11422 11423 11424 11425 11426 11427 11428 11429 11430 11431 11432 11433 11434 11435 11436 11437 11438 11439 11440 11441 11442 11443 11444 11445 11446 11447 11448 11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487 11488 11489 11490 11491 11492 11493 11494 11495 11496 11497 11498 11499 11500 11501 11502 11503 11504 11505 11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 11517 11518 11519 11520 11521 11522 11523 11524 11525 11526 11527 11528 11529 11530 11531 11532 11533 11534 11535 11536 11537 11538 11539 11540 11541 11542 11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569 11570 11571 11572 11573 11574 11575 11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599 11600 11601 11602 11603 11604 11605 11606 11607 11608 11609 11610 11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673 11674 11675 11676 11677 11678 11679 11680 11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720 11721 11722 11723 11724 11725 11726 11727 11728 11729 11730 11731 11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745 11746 11747 11748 11749 11750 11751 11752 11753 11754 11755 11756 11757 11758 11759 11760 11761 11762 11763 11764 11765 11766 11767 11768 11769 11770 11771 11772 11773 11774 11775 11776 11777 11778 11779 11780 11781 11782 11783 11784 11785 11786 11787 11788 11789 11790 11791 11792 11793 11794 11795 11796 11797 11798 11799 11800 11801 11802 11803 11804 11805 11806 11807 11808 11809 11810 11811 11812 11813 11814 11815 11816 11817 11818 11819 11820 11821 11822 11823 11824 11825 11826 11827 11828 11829 11830 11831 11832 11833 11834 11835 11836 11837 11838 11839 11840 11841 11842 11843 11844 11845 11846 11847 11848 11849 11850 11851 11852 11853 11854 11855 11856 11857 11858 11859 11860 11861 11862 11863 11864 11865 11866 11867 11868 11869 11870 11871 11872 11873 11874 11875 11876 11877 11878 11879 11880 11881 11882 11883 11884 11885 11886 11887 11888 11889 11890 11891 11892 11893 11894 11895 11896 11897 11898 11899 11900 11901 11902 11903 11904 11905 11906 11907 11908 11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922 11923 11924 11925 11926 11927 11928 11929 11930 11931 11932 11933 11934 11935 11936 11937 11938 11939 11940 11941 11942 11943 11944 11945 11946 11947 11948 11949 11950 11951 11952 11953 11954 11955 11956 11957 11958 11959 11960 11961 11962 11963 11964 11965 11966 11967 11968 11969 11970 11971 11972 11973 11974 11975 11976 11977 11978 11979 11980 11981 11982 11983 11984 11985 11986 11987 11988 11989 11990 11991 11992 11993 11994 11995 11996 11997 11998 11999 12000 12001 12002 12003 12004 12005 12006 12007 12008 12009 12010 12011 12012 12013 12014 12015 12016 12017 12018 12019 12020 12021 12022 12023 12024 12025 12026 12027 12028 12029 12030 12031 12032 12033 12034 12035 12036 12037 12038 12039 12040 12041 12042 12043 12044 12045 12046 12047 12048 12049 12050 12051 12052 12053 12054 12055 12056 12057 12058 12059 12060 12061 12062 12063 12064 12065 12066 12067 12068 12069 12070 12071 12072 12073 12074 12075 12076 12077 12078 12079 12080 12081 12082 12083 12084 12085 12086 12087 12088 12089 12090 12091 12092 12093 12094 12095 12096 12097 12098 12099 12100 12101 12102 12103 12104 12105 12106 12107 12108 12109 12110 12111 12112 12113 12114 12115 12116 12117 12118 12119 12120 12121 12122 12123 12124 12125 12126 12127 12128 12129 12130 12131 12132 12133 12134 12135 12136 12137 12138 12139 12140 12141 12142 12143 12144 12145 12146 12147 12148 12149 12150 12151 12152 12153 12154 12155 12156 12157 12158 12159 12160 12161 12162 12163 12164 12165 12166 12167 12168 12169 12170 12171 12172 12173 12174 12175 12176 12177 12178 12179 12180 12181 12182 12183 12184 12185 12186 12187 12188 12189 12190 12191 12192 12193 12194 12195 12196 12197 12198 12199 12200 12201 12202 12203 12204 12205 12206 12207 12208 12209 12210 12211 12212 12213 12214 12215 12216 12217 12218 12219 12220 12221 12222 12223 12224 12225 12226 12227 12228 12229 12230 12231 12232 12233 12234 12235 12236 12237 12238 12239 12240 12241 12242 12243 12244 12245 12246 12247 12248 12249 12250 12251 12252 12253 12254 12255 12256 12257 12258 12259 12260 12261 12262 12263 12264 12265 12266 12267 12268 12269 12270 12271 12272 12273 12274 12275 12276 12277 12278 12279 12280 12281 12282 12283 12284 12285 12286 12287 12288 12289 12290 12291 12292 12293 12294 12295 12296 12297 12298 12299 12300 12301 12302 12303 12304 12305 12306 12307 12308 12309 12310 12311 12312 12313 12314 12315 12316 12317 12318 12319 12320 12321 12322 12323 12324 12325 12326 12327 12328 12329 12330 12331 12332 12333 12334 12335 12336 12337 12338 12339 12340 12341 12342 12343 12344 12345 12346 12347 12348 12349 12350 12351 12352 12353 12354 12355 12356 12357 12358 12359 12360 12361 12362 12363 12364 12365 12366 12367 12368 12369 12370 12371 12372 12373 12374 12375 12376 12377 12378 12379 12380 12381 12382 12383 12384 12385 12386 12387 12388 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12401 12402 12403 12404 12405 12406 12407 12408 12409 12410 12411 12412 12413 12414 12415 12416 12417 12418 12419 12420 12421 12422 12423 12424 12425 12426 12427 12428 12429 12430 12431 12432 12433 12434 12435 12436 12437 12438 12439 12440 12441 12442 12443 12444 12445 12446 12447 12448 12449 12450 12451 12452 12453 12454 12455 12456 12457 12458 12459 12460 12461 12462 12463 12464 12465 12466 12467 12468 12469 12470 12471 12472 12473 12474 12475 12476 12477 12478 12479 12480 12481 12482 12483 12484 12485 12486 12487 12488 12489 12490 12491 12492 12493 12494 12495 12496 12497 12498 12499 12500 12501 12502 12503 12504 12505 12506 12507 12508 12509 12510 12511 12512 12513 12514 12515 12516 12517 12518 12519 12520 12521 12522 12523 12524 12525 12526 12527 12528 12529 12530 12531 12532 12533 12534 12535 12536 12537 12538 12539 12540 12541 12542 12543 12544 12545 12546 12547 12548 12549 12550 12551 12552 12553 12554 12555 12556 12557 12558 12559 12560 12561 12562 12563 12564 12565 12566 12567 12568 12569 12570 12571 12572 12573 12574 12575 12576 12577 12578 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux Socket Filter - Kernel level socket filtering * * Based on the design of the Berkeley Packet Filter. The new * internal format has been designed by PLUMgrid: * * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com * * Authors: * * Jay Schulist <jschlst@samba.org> * Alexei Starovoitov <ast@plumgrid.com> * Daniel Borkmann <dborkman@redhat.com> * * Andi Kleen - Fix a few bad bugs and races. * Kris Katterjohn - Added many additional checks in bpf_check_classic() */ #include <linux/atomic.h> #include <linux/bpf_verifier.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sock_diag.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include <linux/gfp.h> #include <net/inet_common.h> #include <net/ip.h> #include <net/protocol.h> #include <net/netlink.h> #include <linux/skbuff.h> #include <linux/skmsg.h> #include <net/sock.h> #include <net/flow_dissector.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include <linux/filter.h> #include <linux/ratelimit.h> #include <linux/seccomp.h> #include <linux/if_vlan.h> #include <linux/bpf.h> #include <linux/btf.h> #include <net/sch_generic.h> #include <net/cls_cgroup.h> #include <net/dst_metadata.h> #include <net/dst.h> #include <net/sock_reuseport.h> #include <net/busy_poll.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/udp.h> #include <linux/bpf_trace.h> #include <net/xdp_sock.h> #include <linux/inetdevice.h> #include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/ip_fib.h> #include <net/nexthop.h> #include <net/flow.h> #include <net/arp.h> #include <net/ipv6.h> #include <net/net_namespace.h> #include <linux/seg6_local.h> #include <net/seg6.h> #include <net/seg6_local.h> #include <net/lwtunnel.h> #include <net/ipv6_stubs.h> #include <net/bpf_sk_storage.h> #include <net/transp_v6.h> #include <linux/btf_ids.h> #include <net/tls.h> #include <net/xdp.h> #include <net/mptcp.h> #include <net/netfilter/nf_conntrack_bpf.h> #include <net/netkit.h> #include <linux/un.h> #include <net/xdp_sock_drv.h> #include <net/inet_dscp.h> #include "dev.h" /* Keep the struct bpf_fib_lookup small so that it fits into a cacheline */ static_assert(sizeof(struct bpf_fib_lookup) == 64, "struct bpf_fib_lookup size check"); static const struct bpf_func_proto * bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) { if (in_compat_syscall()) { struct compat_sock_fprog f32; if (len != sizeof(f32)) return -EINVAL; if (copy_from_sockptr(&f32, src, sizeof(f32))) return -EFAULT; memset(dst, 0, sizeof(*dst)); dst->len = f32.len; dst->filter = compat_ptr(f32.filter); } else { if (len != sizeof(*dst)) return -EINVAL; if (copy_from_sockptr(dst, src, sizeof(*dst))) return -EFAULT; } return 0; } EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); /** * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter * @cap: limit on how short the eBPF program may trim the packet * @reason: record drop reason on errors (negative return value) * * Run the eBPF program and then cut skb->data to correct size returned by * the program. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to bpf_prog_run. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap, enum skb_drop_reason *reason) { int err; struct sk_filter *filter; /* * If the skb was allocated from pfmemalloc reserves, only * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); *reason = SKB_DROP_REASON_PFMEMALLOC; return -ENOMEM; } err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); if (err) { *reason = SKB_DROP_REASON_SOCKET_FILTER; return err; } err = security_sock_rcv_skb(sk, skb); if (err) { *reason = SKB_DROP_REASON_SECURITY_HOOK; return err; } rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { struct sock *save_sk = skb->sk; unsigned int pkt_len; skb->sk = sk; pkt_len = bpf_prog_run_save_cb(filter->prog, skb); skb->sk = save_sk; err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; if (err) *reason = SKB_DROP_REASON_SOCKET_FILTER; } rcu_read_unlock(); return err; } EXPORT_SYMBOL(sk_filter_trim_cap); BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) { return skb_get_poff(skb); } BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[a]; if (!nla_ok(nla, skb->len - a)) return 0; nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset) { if (likely(offset >= 0)) return offset; if (offset >= SKF_NET_OFF) return offset - SKF_NET_OFF + skb_network_offset(skb); if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb)) return offset - SKF_LL_OFF + skb_mac_offset(skb); return INT_MIN; } BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u8 tmp; const int len = sizeof(tmp); offset = bpf_skb_load_helper_convert_offset(skb, offset); if (offset == INT_MIN) return -EFAULT; if (headlen - offset >= len) return *(u8 *)(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return tmp; else return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { __be16 tmp; const int len = sizeof(tmp); offset = bpf_skb_load_helper_convert_offset(skb, offset); if (offset == INT_MIN) return -EFAULT; if (headlen - offset >= len) return get_unaligned_be16(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be16_to_cpu(tmp); else return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { __be32 tmp; const int len = sizeof(tmp); offset = bpf_skb_load_helper_convert_offset(skb, offset); if (offset == INT_MIN) return -EFAULT; if (headlen - offset >= len) return get_unaligned_be32(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be32_to_cpu(tmp); else return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, int, offset) { return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, offset); } static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; switch (skb_field) { case SKF_AD_MARK: BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, offsetof(struct sk_buff, mark)); break; case SKF_AD_PKTTYPE: *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); #endif break; case SKF_AD_QUEUE: BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, queue_mapping)); break; case SKF_AD_VLAN_TAG: BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, vlan_tci)); break; case SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_all) != 4); *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, offsetof(struct sk_buff, vlan_all)); *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); *insn++ = BPF_ALU32_IMM(BPF_MOV, dst_reg, 1); break; } return insn - insn_buf; } static bool convert_bpf_extensions(struct sock_filter *fp, struct bpf_insn **insnp) { struct bpf_insn *insn = *insnp; u32 cnt; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PKTTYPE: cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, dev)); /* if (tmp != 0) goto pc + 1 */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); *insn++ = BPF_EXIT_INSN(); if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, ifindex)); else *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, type)); break; case SKF_AD_OFF + SKF_AD_MARK: cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_RXHASH: BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, hash)); break; case SKF_AD_OFF + SKF_AD_QUEUE: cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG: cnt = convert_skb_access(SKF_AD_VLAN_TAG, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TPID: BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, vlan_proto)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PAY_OFFSET: case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = CTX */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); /* Emit call(arg1=CTX, arg2=A, arg3=X) */ switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); break; case SKF_AD_OFF + SKF_AD_NLATTR: *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); break; case SKF_AD_OFF + SKF_AD_NLATTR_NEST: *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); break; case SKF_AD_OFF + SKF_AD_CPU: *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); break; case SKF_AD_OFF + SKF_AD_RANDOM: *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); bpf_user_rnd_init_once(); break; } break; case SKF_AD_OFF + SKF_AD_ALU_XOR_X: /* A ^= X */ *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); break; default: /* This is just a dummy call to avoid letting the compiler * evict __bpf_call_base() as an optimization. Placed here * where no-one bothers. */ BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); return false; } *insnp = insn; return true; } static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) { const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); bool endian = BPF_SIZE(fp->code) == BPF_H || BPF_SIZE(fp->code) == BPF_W; bool indirect = BPF_MODE(fp->code) == BPF_IND; const int ip_align = NET_IP_ALIGN; struct bpf_insn *insn = *insnp; int offset = fp->k; if (!indirect && ((unaligned_ok && offset >= 0) || (!unaligned_ok && offset >= 0 && offset + ip_align >= 0 && offset + ip_align % size == 0))) { bool ldx_off_ok = offset <= S16_MAX; *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); if (offset) *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian + (!ldx_off_ok * 2)); if (ldx_off_ok) { *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_TMP, 0); } if (endian) *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); *insn++ = BPF_JMP_A(8); } *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); if (fp->k) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); } switch (BPF_SIZE(fp->code)) { case BPF_B: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); break; case BPF_H: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); break; case BPF_W: *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); break; default: return false; } *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn = BPF_EXIT_INSN(); *insnp = insn; return true; } /** * bpf_convert_filter - convert filter program * @prog: the user passed filter program * @len: the length of the user passed filter program * @new_prog: allocated 'struct bpf_prog' or NULL * @new_len: pointer to store length of converted program * @seen_ld_abs: bool whether we've seen ld_abs/ind * * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' * style extended BPF (eBPF). * Conversion workflow: * * 1) First pass for calculating the new program length: * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) * * 2) 2nd pass to remap in two passes: 1st pass finds new * jump offsets, 2nd pass remapping: * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) */ static int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_prog *new_prog, int *new_len, bool *seen_ld_abs) { int new_flen = 0, pass = 0, target, i, stack_off; struct bpf_insn *new_insn, *first_insn = NULL; struct sock_filter *fp; int *addrs = NULL; u8 bpf_src; BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); if (len <= 0 || len > BPF_MAXINSNS) return -EINVAL; if (new_prog) { first_insn = new_prog->insnsi; addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL | __GFP_NOWARN); if (!addrs) return -ENOMEM; } do_pass: new_insn = first_insn; fp = prog; /* Classic BPF related prologue emission. */ if (new_prog) { /* Classic BPF expects A and X to be reset first. These need * to be guaranteed to be the first two instructions. */ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); /* All programs must keep CTX in callee saved BPF_REG_CTX. * In eBPF case it's done by the compiler, here we need to * do this ourself. Initial CTX is present in BPF_REG_ARG1. */ *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); if (*seen_ld_abs) { /* For packet access in classic BPF, cache skb->data * in callee-saved BPF R8 and skb->len - skb->data_len * (headlen) in BPF R9. Since classic BPF is read-only * on CTX, we only need to cache it once. */ *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), BPF_REG_D, BPF_REG_CTX, offsetof(struct sk_buff, data)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, offsetof(struct sk_buff, len)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, data_len)); *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); } } else { new_insn += 3; } for (i = 0; i < len; fp++, i++) { struct bpf_insn tmp_insns[32] = { }; struct bpf_insn *insn = tmp_insns; if (addrs) addrs[i] = new_insn - first_insn; switch (fp->code) { /* All arithmetic insns and skb loads map as-is. */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_NEG: case BPF_LD | BPF_ABS | BPF_W: case BPF_LD | BPF_ABS | BPF_H: case BPF_LD | BPF_ABS | BPF_B: case BPF_LD | BPF_IND | BPF_W: case BPF_LD | BPF_IND | BPF_H: case BPF_LD | BPF_IND | BPF_B: /* Check for overloaded BPF extension and * directly convert it if found, otherwise * just move on with mapping. */ if (BPF_CLASS(fp->code) == BPF_LD && BPF_MODE(fp->code) == BPF_ABS && convert_bpf_extensions(fp, &insn)) break; if (BPF_CLASS(fp->code) == BPF_LD && convert_bpf_ld_abs(fp, &insn)) { *seen_ld_abs = true; break; } if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); /* Error with exception code on div/mod by 0. * For cBPF programs, this was always return 0. */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn++ = BPF_EXIT_INSN(); } *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break; /* Jump transformation cannot use BPF block macros * everywhere as offset calculation and target updates * require a bit more work than the rest, i.e. jump * opcodes map as-is, but offsets need adjustment. */ #define BPF_EMIT_JMP \ do { \ const s32 off_min = S16_MIN, off_max = S16_MAX; \ s32 off; \ \ if (target >= len || target < 0) \ goto err; \ off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ /* Adjust pc relative offset for 2nd or 3rd insn. */ \ off -= insn - tmp_insns; \ /* Reject anything not fitting into insn->off. */ \ if (off < off_min || off > off_max) \ goto err; \ insn->off = off; \ } while (0) case BPF_JMP | BPF_JA: target = i + fp->k + 1; insn->code = fp->code; BPF_EMIT_JMP; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { /* BPF immediates are signed, zero extend * immediate into tmp register and use it * in compare insn. */ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); insn->dst_reg = BPF_REG_A; insn->src_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { insn->dst_reg = BPF_REG_A; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; } /* Common case where 'jump_false' is next insn. */ if (fp->jf == 0) { insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; target = i + fp->jt + 1; BPF_EMIT_JMP; break; } /* Convert some jumps when 'jump_true' is next insn. */ if (fp->jt == 0) { switch (BPF_OP(fp->code)) { case BPF_JEQ: insn->code = BPF_JMP | BPF_JNE | bpf_src; break; case BPF_JGT: insn->code = BPF_JMP | BPF_JLE | bpf_src; break; case BPF_JGE: insn->code = BPF_JMP | BPF_JLT | bpf_src; break; default: goto jmp_rest; } target = i + fp->jf + 1; BPF_EMIT_JMP; break; } jmp_rest: /* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; BPF_EMIT_JMP; insn++; insn->code = BPF_JMP | BPF_JA; target = i + fp->jf + 1; BPF_EMIT_JMP; break; /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: { struct sock_filter tmp = { .code = BPF_LD | BPF_ABS | BPF_B, .k = fp->k, }; *seen_ld_abs = true; /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = BPF_R0 = *(u8 *) (skb->data + K) */ convert_bpf_ld_abs(&tmp, &insn); insn++; /* A &= 0xf */ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); /* A <<= 2 */ *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); /* tmp = X */ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = tmp */ *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); break; } /* RET_K is remapped into 2 insns. RET_A case doesn't need an * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. */ case BPF_RET | BPF_A: case BPF_RET | BPF_K: if (BPF_RVAL(fp->code) == BPF_K) *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 0, fp->k); *insn = BPF_EXIT_INSN(); break; /* Store to stack. */ case BPF_ST: case BPF_STX: stack_off = fp->k * 4 + 4; *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == BPF_ST ? BPF_REG_A : BPF_REG_X, -stack_off); /* check_load_and_stores() verifies that classic BPF can * load from stack only after write, so tracking * stack_depth for ST|STX insns is enough */ if (new_prog && new_prog->aux->stack_depth < stack_off) new_prog->aux->stack_depth = stack_off; break; /* Load from stack. */ case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: stack_off = fp->k * 4 + 4; *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_FP, -stack_off); break; /* A = K or X = K */ case BPF_LD | BPF_IMM: case BPF_LDX | BPF_IMM: *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, fp->k); break; /* X = A */ case BPF_MISC | BPF_TAX: *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); break; /* A = X */ case BPF_MISC | BPF_TXA: *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); break; /* A = skb->len or X = skb->len */ case BPF_LD | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN: *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_CTX, offsetof(struct sk_buff, len)); break; /* Access seccomp_data fields. */ case BPF_LDX | BPF_ABS | BPF_W: /* A = *(u32 *) (ctx + K) */ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break; /* Unknown instruction. */ default: goto err; } insn++; if (new_prog) memcpy(new_insn, tmp_insns, sizeof(*insn) * (insn - tmp_insns)); new_insn += insn - tmp_insns; } if (!new_prog) { /* Only calculating new length. */ *new_len = new_insn - first_insn; if (*seen_ld_abs) *new_len += 4; /* Prologue bits. */ return 0; } pass++; if (new_flen != new_insn - first_insn) { new_flen = new_insn - first_insn; if (pass > 2) goto err; goto do_pass; } kfree(addrs); BUG_ON(*new_len != new_flen); return 0; err: kfree(addrs); return -EINVAL; } /* Security: * * As we dont want to clear mem[] array for each packet going through * __bpf_prog_run(), we check that filter loaded by user never try to read * a cell if not previously written, and we check all branches to be sure * a malicious user doesn't try to abuse us. */ static int check_load_and_stores(const struct sock_filter *filter, int flen) { u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0; BUILD_BUG_ON(BPF_MEMWORDS > 16); masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; memset(masks, 0xff, flen * sizeof(*masks)); for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc]; switch (filter[pc].code) { case BPF_ST: case BPF_STX: memvalid |= (1 << filter[pc].k); break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; case BPF_JMP | BPF_JA: /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; break; } } error: kfree(masks); return ret; } static bool chk_code_allowed(u16 code_to_probe) { static const bool codes[] = { /* 32 bit ALU operations */ [BPF_ALU | BPF_ADD | BPF_K] = true, [BPF_ALU | BPF_ADD | BPF_X] = true, [BPF_ALU | BPF_SUB | BPF_K] = true, [BPF_ALU | BPF_SUB | BPF_X] = true, [BPF_ALU | BPF_MUL | BPF_K] = true, [BPF_ALU | BPF_MUL | BPF_X] = true, [BPF_ALU | BPF_DIV | BPF_K] = true, [BPF_ALU | BPF_DIV | BPF_X] = true, [BPF_ALU | BPF_MOD | BPF_K] = true, [BPF_ALU | BPF_MOD | BPF_X] = true, [BPF_ALU | BPF_AND | BPF_K] = true, [BPF_ALU | BPF_AND | BPF_X] = true, [BPF_ALU | BPF_OR | BPF_K] = true, [BPF_ALU | BPF_OR | BPF_X] = true, [BPF_ALU | BPF_XOR | BPF_K] = true, [BPF_ALU | BPF_XOR | BPF_X] = true, [BPF_ALU | BPF_LSH | BPF_K] = true, [BPF_ALU | BPF_LSH | BPF_X] = true, [BPF_ALU | BPF_RSH | BPF_K] = true, [BPF_ALU | BPF_RSH | BPF_X] = true, [BPF_ALU | BPF_NEG] = true, /* Load instructions */ [BPF_LD | BPF_W | BPF_ABS] = true, [BPF_LD | BPF_H | BPF_ABS] = true, [BPF_LD | BPF_B | BPF_ABS] = true, [BPF_LD | BPF_W | BPF_LEN] = true, [BPF_LD | BPF_W | BPF_IND] = true, [BPF_LD | BPF_H | BPF_IND] = true, [BPF_LD | BPF_B | BPF_IND] = true, [BPF_LD | BPF_IMM] = true, [BPF_LD | BPF_MEM] = true, [BPF_LDX | BPF_W | BPF_LEN] = true, [BPF_LDX | BPF_B | BPF_MSH] = true, [BPF_LDX | BPF_IMM] = true, [BPF_LDX | BPF_MEM] = true, /* Store instructions */ [BPF_ST] = true, [BPF_STX] = true, /* Misc instructions */ [BPF_MISC | BPF_TAX] = true, [BPF_MISC | BPF_TXA] = true, /* Return instructions */ [BPF_RET | BPF_K] = true, [BPF_RET | BPF_A] = true, /* Jump instructions */ [BPF_JMP | BPF_JA] = true, [BPF_JMP | BPF_JEQ | BPF_K] = true, [BPF_JMP | BPF_JEQ | BPF_X] = true, [BPF_JMP | BPF_JGE | BPF_K] = true, [BPF_JMP | BPF_JGE | BPF_X] = true, [BPF_JMP | BPF_JGT | BPF_K] = true, [BPF_JMP | BPF_JGT | BPF_X] = true, [BPF_JMP | BPF_JSET | BPF_K] = true, [BPF_JMP | BPF_JSET | BPF_X] = true, }; if (code_to_probe >= ARRAY_SIZE(codes)) return false; return codes[code_to_probe]; } static bool bpf_check_basics_ok(const struct sock_filter *filter, unsigned int flen) { if (filter == NULL) return false; if (flen == 0 || flen > BPF_MAXINSNS) return false; return true; } /** * bpf_check_classic - verify socket filter code * @filter: filter to verify * @flen: length of filter * * Check the user's filter code. If we let some ugly * filter code slip through kaboom! The filter must contain * no references or jumps that are out of range, no illegal * instructions, and must end with a RET instruction. * * All jumps are forward as they are not signed. * * Returns 0 if the rule set is legal or -EINVAL if not. */ static int bpf_check_classic(const struct sock_filter *filter, unsigned int flen) { bool anc_found; int pc; /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; /* May we actually operate on this code? */ if (!chk_code_allowed(ftest->code)) return -EINVAL; /* Some instructions need special checks */ switch (ftest->code) { case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_K: /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: if (ftest->k >= 32) return -EINVAL; break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: case BPF_ST: case BPF_STX: /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; case BPF_JMP | BPF_JA: /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: anc_found = false; if (bpf_anc_helper(ftest) & BPF_ANC) anc_found = true; /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } } /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { case BPF_RET | BPF_K: case BPF_RET | BPF_A: return check_load_and_stores(filter, flen); } return -EINVAL; } static int bpf_prog_store_orig_filter(struct bpf_prog *fp, const struct sock_fprog *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct sock_fprog_kern *fkprog; fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); if (!fp->orig_prog) return -ENOMEM; fkprog = fp->orig_prog; fkprog->len = fprog->len; fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL | __GFP_NOWARN); if (!fkprog->filter) { kfree(fp->orig_prog); return -ENOMEM; } return 0; } static void bpf_release_orig_filter(struct bpf_prog *fp) { struct sock_fprog_kern *fprog = fp->orig_prog; if (fprog) { kfree(fprog->filter); kfree(fprog); } } static void __bpf_prog_release(struct bpf_prog *prog) { if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { bpf_prog_put(prog); } else { bpf_release_orig_filter(prog); bpf_prog_free(prog); } } static void __sk_filter_release(struct sk_filter *fp) { __bpf_prog_release(fp->prog); kfree(fp); } /** * sk_filter_release_rcu - Release a socket filter by rcu_head * @rcu: rcu_head that contains the sk_filter to free */ static void sk_filter_release_rcu(struct rcu_head *rcu) { struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); __sk_filter_release(fp); } /** * sk_filter_release - release a socket filter * @fp: filter to remove * * Remove a filter from a socket and release its resources. */ static void sk_filter_release(struct sk_filter *fp) { if (refcount_dec_and_test(&fp->refcnt)) call_rcu(&fp->rcu, sk_filter_release_rcu); } void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); atomic_sub(filter_size, &sk->sk_omem_alloc); sk_filter_release(fp); } /* try to charge the socket memory if there is space available * return true on success */ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); u32 filter_size = bpf_prog_size(fp->prog->len); /* same check as in sock_kmalloc() */ if (filter_size <= optmem_max && atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { atomic_add(filter_size, &sk->sk_omem_alloc); return true; } return false; } bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) { if (!refcount_inc_not_zero(&fp->refcnt)) return false; if (!__sk_filter_charge(sk, fp)) { sk_filter_release(fp); return false; } return true; } static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) { struct sock_filter *old_prog; struct bpf_prog *old_fp; int err, new_len, old_len = fp->len; bool seen_ld_abs = false; /* We are free to overwrite insns et al right here as it won't be used at * this point in time anymore internally after the migration to the eBPF * instruction representation. */ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct bpf_insn)); /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. */ old_prog = kmemdup_array(fp->insns, old_len, sizeof(struct sock_filter), GFP_KERNEL | __GFP_NOWARN); if (!old_prog) { err = -ENOMEM; goto out_err; } /* 1st pass: calculate the new program length. */ err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs); if (err) goto out_err_free; /* Expand fp for appending the new filter representation. */ old_fp = fp; fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); if (!fp) { /* The old_fp is still around in case we couldn't * allocate new memory, so uncharge on that one. */ fp = old_fp; err = -ENOMEM; goto out_err_free; } fp->len = new_len; /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ err = bpf_convert_filter(old_prog, old_len, fp, &new_len, &seen_ld_abs); if (err) /* 2nd bpf_convert_filter() can fail only if it fails * to allocate memory, remapping must succeed. Note, * that at this time old_fp has already been released * by krealloc(). */ goto out_err_free; fp = bpf_prog_select_runtime(fp, &err); if (err) goto out_err_free; kfree(old_prog); return fp; out_err_free: kfree(old_prog); out_err: __bpf_prog_release(fp); return ERR_PTR(err); } static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, bpf_aux_classic_check_t trans) { int err; fp->bpf_func = NULL; fp->jited = 0; err = bpf_check_classic(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } /* There might be additional checks and transformations * needed on classic filters, f.e. in case of seccomp. */ if (trans) { err = trans(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } } /* Probe if we can JIT compile the filter and if so, do * the compilation of the filter. */ bpf_jit_compile(fp); /* JIT compiler couldn't process this filter, so do the eBPF translation * for the optimized interpreter. */ if (!fp->jited) fp = bpf_migrate_filter(fp); return fp; } /** * bpf_prog_create - create an unattached filter * @pfp: the unattached filter that is created * @fprog: the filter program * * Create a filter independent of any socket. We first run some * sanity checks on it to make sure it does not explode on us later. * If an error occurs or there is insufficient memory for the filter * a negative errno code is returned. On success the return is zero. */ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; memcpy(fp->insns, fprog->filter, fsize); fp->len = fprog->len; /* Since unattached filters are not copied back to user * space through sk_get_filter(), we do not need to hold * a copy here, and can spare us the work. */ fp->orig_prog = NULL; /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, NULL); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(bpf_prog_create); /** * bpf_prog_create_from_user - create an unattached filter from user buffer * @pfp: the unattached filter that is created * @fprog: the filter program * @trans: post-classic verifier transformation handler * @save_orig: save classic BPF program * * This function effectively does the same as bpf_prog_create(), only * that it builds up its insns buffer from user space provided buffer. * It also allows for passing a bpf_aux_classic_check_t handler. */ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; int err; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; if (copy_from_user(fp->insns, fprog->filter, fsize)) { __bpf_prog_free(fp); return -EFAULT; } fp->len = fprog->len; fp->orig_prog = NULL; if (save_orig) { err = bpf_prog_store_orig_filter(fp, fprog); if (err) { __bpf_prog_free(fp); return -ENOMEM; } } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, trans); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); void bpf_prog_destroy(struct bpf_prog *fp) { __bpf_prog_release(fp); } EXPORT_SYMBOL_GPL(bpf_prog_destroy); static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) { struct sk_filter *fp, *old_fp; fp = kmalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->prog = prog; if (!__sk_filter_charge(sk, fp)) { kfree(fp); return -ENOMEM; } refcount_set(&fp->refcnt, 1); old_fp = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_filter, fp); if (old_fp) sk_filter_uncharge(sk, old_fp); return 0; } static struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *prog; int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL); prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM); if (copy_from_user(prog->insns, fprog->filter, fsize)) { __bpf_prog_free(prog); return ERR_PTR(-EFAULT); } prog->len = fprog->len; err = bpf_prog_store_orig_filter(prog, fprog); if (err) { __bpf_prog_free(prog); return ERR_PTR(-ENOMEM); } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ return bpf_prepare_filter(prog, NULL); } /** * sk_attach_filter - attach a socket filter * @fprog: the filter program * @sk: the socket to use * * Attach the user's filter code. We first run some sanity checks on * it to make sure it does not explode on us later. If an error * occurs or there is insufficient memory for the filter a negative * errno code is returned. On success the return is zero. */ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { __bpf_prog_release(prog); return err; } return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err, optmem_max; if (IS_ERR(prog)) return PTR_ERR(prog); optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); if (bpf_prog_size(prog->len) > optmem_max) err = -ENOMEM; else err = reuseport_attach_prog(sk, prog); if (err) __bpf_prog_release(prog); return err; } static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) { if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); } int sk_attach_bpf(u32 ufd, struct sock *sk) { struct bpf_prog *prog = __get_bpf(ufd, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { bpf_prog_put(prog); return err; } return 0; } int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) { struct bpf_prog *prog; int err, optmem_max; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); if (PTR_ERR(prog) == -EINVAL) prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); if (IS_ERR(prog)) return PTR_ERR(prog); if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { /* Like other non BPF_PROG_TYPE_SOCKET_FILTER * bpf prog (e.g. sockmap). It depends on the * limitation imposed by bpf_prog_load(). * Hence, sysctl_optmem_max is not checked. */ if ((sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM) || (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_TCP) || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { err = -ENOTSUPP; goto err_prog_put; } } else { /* BPF_PROG_TYPE_SOCKET_FILTER */ optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); if (bpf_prog_size(prog->len) > optmem_max) { err = -ENOMEM; goto err_prog_put; } } err = reuseport_attach_prog(sk, prog); err_prog_put: if (err) bpf_prog_put(prog); return err; } void sk_reuseport_prog_free(struct bpf_prog *prog) { if (!prog) return; if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) bpf_prog_put(prog); else bpf_prog_destroy(prog); } static inline int __bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { #ifdef CONFIG_DEBUG_NET /* Avoid a splat in pskb_may_pull_reason() */ if (write_len > INT_MAX) return -EINVAL; #endif return skb_ensure_writable(skb, write_len); } static inline int bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { int err = __bpf_try_make_writable(skb, write_len); bpf_compute_data_pointers(skb); return err; } static int bpf_try_make_head_writable(struct sk_buff *skb) { return bpf_try_make_writable(skb, skb_headlen(skb)); } static inline void bpf_push_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); } static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); } BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len, u64, flags) { void *ptr; if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) return -EINVAL; if (unlikely(offset > INT_MAX)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; ptr = skb->data + offset; if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpull_rcsum(skb, ptr, len, offset); memcpy(ptr, from, len); if (flags & BPF_F_RECOMPUTE_CSUM) __skb_postpush_rcsum(skb, ptr, len, offset); if (flags & BPF_F_INVALIDATE_HASH) skb_clear_hash(skb); return 0; } static const struct bpf_func_proto bpf_skb_store_bytes_proto = { .func = bpf_skb_store_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) { return ____bpf_skb_store_bytes(skb, offset, from, len, flags); } BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > INT_MAX)) goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_skb_load_bytes_proto = { .func = bpf_skb_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) { return ____bpf_skb_load_bytes(skb, offset, to, len); } BPF_CALL_4(bpf_flow_dissector_load_bytes, const struct bpf_flow_dissector *, ctx, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > 0xffff)) goto err_clear; if (unlikely(!ctx->skb)) goto err_clear; ptr = skb_header_pointer(ctx->skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { .func = bpf_flow_dissector_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { u8 *end = skb_tail_pointer(skb); u8 *start, *ptr; if (unlikely(offset > 0xffff)) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: if (unlikely(!skb_mac_header_was_set(skb))) goto err_clear; start = skb_mac_header(skb); break; case BPF_HDR_START_NET: start = skb_network_header(skb); break; default: goto err_clear; } ptr = start + offset; if (likely(ptr + len <= end)) { memcpy(to, ptr, len); return 0; } err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { .func = bpf_skb_load_bytes_relative, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto bpf_skb_pull_data_proto = { .func = bpf_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) { return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; } static const struct bpf_func_proto bpf_sk_fullsock_proto = { .func = bpf_sk_fullsock, .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_SOCK_COMMON, }; static inline int sk_skb_try_make_writable(struct sk_buff *skb, unsigned int write_len) { return __bpf_try_make_writable(skb, write_len); } BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto sk_skb_pull_data_proto = { .func = sk_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { __sum16 *ptr; if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); switch (flags & BPF_F_HDR_FIELD_MASK) { case 0: if (unlikely(from != 0)) return -EINVAL; csum_replace_by_diff(ptr, to); break; case 2: csum_replace2(ptr, from, to); break; case 4: csum_replace4(ptr, from, to); break; default: return -EINVAL; } return 0; } static const struct bpf_func_proto bpf_l3_csum_replace_proto = { .func = bpf_l3_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; bool do_mforce = flags & BPF_F_MARK_ENFORCE; bool is_ipv6 = flags & BPF_F_IPV6; __sum16 *ptr; if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK | BPF_F_IPV6))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); if (is_mmzero && !do_mforce && !*ptr) return 0; switch (flags & BPF_F_HDR_FIELD_MASK) { case 0: if (unlikely(from != 0)) return -EINVAL; inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo, is_ipv6); break; case 2: inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); break; case 4: inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); break; default: return -EINVAL; } if (is_mmzero && !*ptr) *ptr = CSUM_MANGLED_0; return 0; } static const struct bpf_func_proto bpf_l4_csum_replace_proto = { .func = bpf_l4_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, __be32 *, to, u32, to_size, __wsum, seed) { /* This is quite flexible, some examples: * * from_size == 0, to_size > 0, seed := csum --> pushing data * from_size > 0, to_size == 0, seed := csum --> pulling data * from_size > 0, to_size > 0, seed := 0 --> diffing data * * Even for diffing, from_size and to_size don't need to be equal. */ __wsum ret = seed; if (from_size && to_size) ret = csum_sub(csum_partial(to, to_size, ret), csum_partial(from, from_size, 0)); else if (to_size) ret = csum_partial(to, to_size, ret); else if (from_size) ret = ~csum_partial(from, from_size, ~ret); return csum_from32to16((__force unsigned int)ret); } static const struct bpf_func_proto bpf_csum_diff_proto = { .func = bpf_csum_diff, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) { /* The interface is to be used in combination with bpf_csum_diff() * for direct packet writes. csum rotation for alignment as well * as emulating csum_sub() can be done from the eBPF program. */ if (skb->ip_summed == CHECKSUM_COMPLETE) return (skb->csum = csum_add(skb->csum, csum)); return -ENOTSUPP; } static const struct bpf_func_proto bpf_csum_update_proto = { .func = bpf_csum_update, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level) { /* The interface is to be used in combination with bpf_skb_adjust_room() * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET * is passed as flags, for example. */ switch (level) { case BPF_CSUM_LEVEL_INC: __skb_incr_checksum_unnecessary(skb); break; case BPF_CSUM_LEVEL_DEC: __skb_decr_checksum_unnecessary(skb); break; case BPF_CSUM_LEVEL_RESET: __skb_reset_checksum_unnecessary(skb); break; case BPF_CSUM_LEVEL_QUERY: return skb->ip_summed == CHECKSUM_UNNECESSARY ? skb->csum_level : -EACCES; default: return -EINVAL; } return 0; } static const struct bpf_func_proto bpf_csum_level_proto = { .func = bpf_csum_level, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) { return dev_forward_skb_nomtu(dev, skb); } static inline int __bpf_rx_skb_no_mac(struct net_device *dev, struct sk_buff *skb) { int ret = ____dev_forward_skb(dev, skb, false); if (likely(!ret)) { skb->dev = dev; ret = netif_rx(skb); } return ret; } static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; if (dev_xmit_recursion()) { net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; } skb->dev = dev; skb_set_redirected_noclear(skb, skb_at_tc_ingress(skb)); skb_clear_tstamp(skb); dev_xmit_recursion_inc(); ret = dev_queue_xmit(skb); dev_xmit_recursion_dec(); return ret; } static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, u32 flags) { unsigned int mlen = skb_network_offset(skb); if (unlikely(skb->len <= mlen)) { kfree_skb(skb); return -ERANGE; } if (mlen) { __skb_pull(skb, mlen); /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that * the skb is originated from ingress (i.e. a forwarded skb) * to ensure that rcsum starts at net header. */ if (!skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); } skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); } static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, u32 flags) { /* Verify that a link layer header is carried */ if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) { kfree_skb(skb); return -ERANGE; } bpf_push_mac_rcsum(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); } static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, u32 flags) { if (dev_is_mac_header_xmit(dev)) return __bpf_redirect_common(skb, dev, flags); else return __bpf_redirect_no_mac(skb, dev, flags); } #if IS_ENABLED(CONFIG_IPV6) static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { u32 hh_len = LL_RESERVED_SPACE(dev); const struct in6_addr *nexthop; struct dst_entry *dst = NULL; struct neighbour *neigh; if (dev_xmit_recursion()) { net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); goto out_drop; } skb->dev = dev; skb_clear_tstamp(skb); if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { skb = skb_expand_head(skb, hh_len); if (!skb) return -ENOMEM; } rcu_read_lock(); if (!nh) { dst = skb_dst(skb); nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr); } else { nexthop = &nh->ipv6_nh; } neigh = ip_neigh_gw6(dev, nexthop); if (likely(!IS_ERR(neigh))) { int ret; sock_confirm_neigh(skb, neigh); local_bh_disable(); dev_xmit_recursion_inc(); ret = neigh_output(neigh, skb, false); dev_xmit_recursion_dec(); local_bh_enable(); rcu_read_unlock(); return ret; } rcu_read_unlock(); if (dst) IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); out_drop: kfree_skb(skb); return -ENETDOWN; } static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net *net = dev_net(dev); int err, ret = NET_XMIT_DROP; if (!nh) { struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_flags = FLOWI_FLAG_ANYSRC, .flowi6_mark = skb->mark, .flowlabel = ip6_flowinfo(ip6h), .flowi6_oif = dev->ifindex, .flowi6_proto = ip6h->nexthdr, .daddr = ip6h->daddr, .saddr = ip6h->saddr, }; dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); if (IS_ERR(dst)) goto out_drop; skb_dst_drop(skb); skb_dst_set(skb, dst); } else if (nh->nh_family != AF_INET6) { goto out_drop; } err = bpf_out_neigh_v6(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out_xmit: return ret; } #else static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { kfree_skb(skb); return NET_XMIT_DROP; } #endif /* CONFIG_IPV6 */ #if IS_ENABLED(CONFIG_INET) static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { u32 hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; bool is_v6gw = false; if (dev_xmit_recursion()) { net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); goto out_drop; } skb->dev = dev; skb_clear_tstamp(skb); if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { skb = skb_expand_head(skb, hh_len); if (!skb) return -ENOMEM; } rcu_read_lock(); if (!nh) { struct rtable *rt = skb_rtable(skb); neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); } else if (nh->nh_family == AF_INET6) { neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); is_v6gw = true; } else if (nh->nh_family == AF_INET) { neigh = ip_neigh_gw4(dev, nh->ipv4_nh); } else { rcu_read_unlock(); goto out_drop; } if (likely(!IS_ERR(neigh))) { int ret; sock_confirm_neigh(skb, neigh); local_bh_disable(); dev_xmit_recursion_inc(); ret = neigh_output(neigh, skb, is_v6gw); dev_xmit_recursion_dec(); local_bh_enable(); rcu_read_unlock(); return ret; } rcu_read_unlock(); out_drop: kfree_skb(skb); return -ENETDOWN; } static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { const struct iphdr *ip4h = ip_hdr(skb); struct net *net = dev_net(dev); int err, ret = NET_XMIT_DROP; if (!nh) { struct flowi4 fl4 = { .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, .flowi4_dscp = ip4h_dscp(ip4h), .flowi4_oif = dev->ifindex, .flowi4_proto = ip4h->protocol, .daddr = ip4h->daddr, .saddr = ip4h->saddr, }; struct rtable *rt; rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto out_drop; if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { ip_rt_put(rt); goto out_drop; } skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); } err = bpf_out_neigh_v4(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out_xmit: return ret; } #else static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { kfree_skb(skb); return NET_XMIT_DROP; } #endif /* CONFIG_INET */ static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, struct bpf_nh_params *nh) { struct ethhdr *ethh = eth_hdr(skb); if (unlikely(skb->mac_header >= skb->network_header)) goto out; bpf_push_mac_rcsum(skb); if (is_multicast_ether_addr(ethh->h_dest)) goto out; skb_pull(skb, sizeof(*ethh)); skb_unset_mac_header(skb); skb_reset_network_header(skb); if (skb->protocol == htons(ETH_P_IP)) return __bpf_redirect_neigh_v4(skb, dev, nh); else if (skb->protocol == htons(ETH_P_IPV6)) return __bpf_redirect_neigh_v6(skb, dev, nh); out: kfree_skb(skb); return -ENOTSUPP; } /* Internal, non-exposed redirect flags. */ enum { BPF_F_NEIGH = (1ULL << 16), BPF_F_PEER = (1ULL << 17), BPF_F_NEXTHOP = (1ULL << 18), #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) }; BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; struct sk_buff *clone; int ret; BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS); if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return -EINVAL; /* BPF test infra's convert___skb_to_skb() can create type-less * GSO packets. gso_features_check() will detect this as a bad * offload. However, lets not leak them out in the first place. */ if (unlikely(skb_is_gso(skb) && !skb_shinfo(skb)->gso_type)) return -EBADMSG; dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); if (unlikely(!dev)) return -EINVAL; clone = skb_clone(skb, GFP_ATOMIC); if (unlikely(!clone)) return -ENOMEM; /* For direct write, we need to keep the invariant that the skbs * we're dealing with need to be uncloned. Should uncloning fail * here, we need to free the just generated clone to unclone once * again. */ ret = bpf_try_make_head_writable(skb); if (unlikely(ret)) { kfree_skb(clone); return -ENOMEM; } return __bpf_redirect(clone, dev, flags); } static const struct bpf_func_proto bpf_clone_redirect_proto = { .func = bpf_clone_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static struct net_device *skb_get_peer_dev(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; if (likely(ops->ndo_get_peer_dev)) return INDIRECT_CALL_1(ops->ndo_get_peer_dev, netkit_peer_dev, dev); return NULL; } int skb_do_redirect(struct sk_buff *skb) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct net *net = dev_net(skb->dev); struct net_device *dev; u32 flags = ri->flags; dev = dev_get_by_index_rcu(net, ri->tgt_index); ri->tgt_index = 0; ri->flags = 0; if (unlikely(!dev)) goto out_drop; if (flags & BPF_F_PEER) { if (unlikely(!skb_at_tc_ingress(skb))) goto out_drop; dev = skb_get_peer_dev(dev); if (unlikely(!dev || !(dev->flags & IFF_UP) || net_eq(net, dev_net(dev)))) goto out_drop; skb->dev = dev; dev_sw_netstats_rx_add(dev, skb->len); skb_scrub_packet(skb, false); return -EAGAIN; } return flags & BPF_F_NEIGH ? __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? &ri->nh : NULL) : __bpf_redirect(skb, dev, flags); out_drop: kfree_skb(skb); return -EINVAL; } BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return TC_ACT_SHOT; ri->flags = flags; ri->tgt_index = ifindex; return TC_ACT_REDIRECT; } static const struct bpf_func_proto bpf_redirect_proto = { .func = bpf_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags)) return TC_ACT_SHOT; ri->flags = BPF_F_PEER; ri->tgt_index = ifindex; return TC_ACT_REDIRECT; } static const struct bpf_func_proto bpf_redirect_peer_proto = { .func = bpf_redirect_peer, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, int, plen, u64, flags) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely((plen && plen < sizeof(*params)) || flags)) return TC_ACT_SHOT; ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); ri->tgt_index = ifindex; BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); if (plen) memcpy(&ri->nh, params, sizeof(ri->nh)); return TC_ACT_REDIRECT; } static const struct bpf_func_proto bpf_redirect_neigh_proto = { .func = bpf_redirect_neigh, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) { msg->apply_bytes = bytes; return 0; } static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { .func = bpf_msg_apply_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) { msg->cork_bytes = bytes; return 0; } static void sk_msg_reset_curr(struct sk_msg *msg) { if (!msg->sg.size) { msg->sg.curr = msg->sg.start; msg->sg.copybreak = 0; } else { u32 i = msg->sg.end; sk_msg_iter_var_prev(i); msg->sg.curr = i; msg->sg.copybreak = msg->sg.data[i].length; } } static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { .func = bpf_msg_cork_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, u32, end, u64, flags) { u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; u32 first_sge, last_sge, i, shift, bytes_sg_total; struct scatterlist *sge; u8 *raw, *to, *from; struct page *page; if (unlikely(flags || end <= start)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { offset += len; len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; sk_msg_iter_var_next(i); } while (i != msg->sg.end); if (unlikely(start >= offset + len)) return -EINVAL; first_sge = i; /* The start may point into the sg element so we need to also * account for the headroom. */ bytes_sg_total = start - offset + bytes; if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) goto out; /* At this point we need to linearize multiple scatterlist * elements or a single shared page. Either way we need to * copy into a linear buffer exclusively owned by BPF. Then * place the buffer in the scatterlist and fixup the original * entries by removing the entries now in the linear buffer * and shifting the remaining entries. For now we do not try * to copy partial entries to avoid complexity of running out * of sg_entry slots. The downside is reading a single byte * will copy the entire sg entry. */ do { copy += sk_msg_elem(msg, i)->length; sk_msg_iter_var_next(i); if (bytes_sg_total <= copy) break; } while (i != msg->sg.end); last_sge = i; if (unlikely(bytes_sg_total > copy)) return -EINVAL; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy)); if (unlikely(!page)) return -ENOMEM; raw = page_address(page); i = first_sge; do { sge = sk_msg_elem(msg, i); from = sg_virt(sge); len = sge->length; to = raw + poffset; memcpy(to, from, len); poffset += len; sge->length = 0; put_page(sg_page(sge)); sk_msg_iter_var_next(i); } while (i != last_sge); sg_set_page(&msg->sg.data[first_sge], page, copy, 0); /* To repair sg ring we need to shift entries. If we only * had a single entry though we can just replace it and * be done. Otherwise walk the ring and shift the entries. */ WARN_ON_ONCE(last_sge == first_sge); shift = last_sge > first_sge ? last_sge - first_sge - 1 : NR_MSG_FRAG_IDS - first_sge + last_sge - 1; if (!shift) goto out; i = first_sge; sk_msg_iter_var_next(i); do { u32 move_from; if (i + shift >= NR_MSG_FRAG_IDS) move_from = i + shift - NR_MSG_FRAG_IDS; else move_from = i + shift; if (move_from == msg->sg.end) break; msg->sg.data[i] = msg->sg.data[move_from]; msg->sg.data[move_from].length = 0; msg->sg.data[move_from].page_link = 0; msg->sg.data[move_from].offset = 0; sk_msg_iter_var_next(i); } while (1); msg->sg.end = msg->sg.end - shift > msg->sg.end ? msg->sg.end - shift + NR_MSG_FRAG_IDS : msg->sg.end - shift; out: sk_msg_reset_curr(msg); msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; msg->data_end = msg->data + bytes; return 0; } static const struct bpf_func_proto bpf_msg_pull_data_proto = { .func = bpf_msg_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; u32 new, i = 0, l = 0, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page; if (unlikely(flags)) return -EINVAL; if (unlikely(len == 0)) return 0; /* First find the starting scatterlist element */ i = msg->sg.start; do { offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; sk_msg_iter_var_next(i); } while (i != msg->sg.end); if (start > offset + l) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); /* If no space available will fallback to copy, we need at * least one scatterlist elem available to push data into * when start aligns to the beginning of an element or two * when it falls inside an element. We handle the start equals * offset case because its the common case for inserting a * header. */ if (!space || (space == 1 && start != offset)) copy = msg->sg.data[i].length; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy + len)); if (unlikely(!page)) return -ENOMEM; if (copy) { int front, back; raw = page_address(page); if (i == msg->sg.end) sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); front = start - offset; back = psge->length - front; from = sg_virt(psge); if (front) memcpy(raw, from, front); if (back) { from += front; to = raw + front + len; memcpy(to, from, back); } put_page(sg_page(psge)); new = i; goto place_new; } if (start - offset) { if (i == msg->sg.end) sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); rsge = sk_msg_elem_cpy(msg, i); psge->length = start - offset; rsge.length -= psge->length; rsge.offset += start; sk_msg_iter_var_next(i); sg_unmark_end(psge); sg_unmark_end(&rsge); } /* Slot(s) to place newly allocated data */ sk_msg_iter_next(msg, end); new = i; sk_msg_iter_var_next(i); if (i == msg->sg.end) { if (!rsge.length) goto place_new; sk_msg_iter_next(msg, end); goto place_new; } /* Shift one or two slots as needed */ sge = sk_msg_elem_cpy(msg, new); sg_unmark_end(&sge); nsge = sk_msg_elem_cpy(msg, i); if (rsge.length) { sk_msg_iter_var_next(i); nnsge = sk_msg_elem_cpy(msg, i); sk_msg_iter_next(msg, end); } while (i != msg->sg.end) { msg->sg.data[i] = sge; sge = nsge; sk_msg_iter_var_next(i); if (rsge.length) { nsge = nnsge; nnsge = sk_msg_elem_cpy(msg, i); } else { nsge = sk_msg_elem_cpy(msg, i); } } place_new: /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; __clear_bit(new, msg->sg.copy); sg_set_page(&msg->sg.data[new], page, len + copy, 0); if (rsge.length) { get_page(sg_page(&rsge)); sk_msg_iter_var_next(new); msg->sg.data[new] = rsge; } sk_msg_reset_curr(msg); sk_msg_compute_data_pointers(msg); return 0; } static const struct bpf_func_proto bpf_msg_push_data_proto = { .func = bpf_msg_push_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static void sk_msg_shift_left(struct sk_msg *msg, int i) { struct scatterlist *sge = sk_msg_elem(msg, i); int prev; put_page(sg_page(sge)); do { prev = i; sk_msg_iter_var_next(i); msg->sg.data[prev] = msg->sg.data[i]; } while (i != msg->sg.end); sk_msg_iter_prev(msg, end); } static void sk_msg_shift_right(struct sk_msg *msg, int i) { struct scatterlist tmp, sge; sk_msg_iter_next(msg, end); sge = sk_msg_elem_cpy(msg, i); sk_msg_iter_var_next(i); tmp = sk_msg_elem_cpy(msg, i); while (i != msg->sg.end) { msg->sg.data[i] = sge; sk_msg_iter_var_next(i); sge = tmp; tmp = sk_msg_elem_cpy(msg, i); } } BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { u32 i = 0, l = 0, space, offset = 0; u64 last = start + len; int pop; if (unlikely(flags)) return -EINVAL; if (unlikely(len == 0)) return 0; /* First find the starting scatterlist element */ i = msg->sg.start; do { offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; sk_msg_iter_var_next(i); } while (i != msg->sg.end); /* Bounds checks: start and pop must be inside message */ if (start >= offset + l || last > msg->sg.size) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); pop = len; /* --------------| offset * -| start |-------- len -------| * * |----- a ----|-------- pop -------|----- b ----| * |______________________________________________| length * * * a: region at front of scatter element to save * b: region at back of scatter element to save when length > A + pop * pop: region to pop from element, same as input 'pop' here will be * decremented below per iteration. * * Two top-level cases to handle when start != offset, first B is non * zero and second B is zero corresponding to when a pop includes more * than one element. * * Then if B is non-zero AND there is no space allocate space and * compact A, B regions into page. If there is space shift ring to * the right free'ing the next element in ring to place B, leaving * A untouched except to reduce length. */ if (start != offset) { struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); int a = start - offset; int b = sge->length - pop - a; sk_msg_iter_var_next(i); if (b > 0) { if (space) { sge->length = a; sk_msg_shift_right(msg, i); nsge = sk_msg_elem(msg, i); get_page(sg_page(sge)); sg_set_page(nsge, sg_page(sge), b, sge->offset + pop + a); } else { struct page *page, *orig; u8 *to, *from; page = alloc_pages(__GFP_NOWARN | __GFP_COMP | GFP_ATOMIC, get_order(a + b)); if (unlikely(!page)) return -ENOMEM; orig = sg_page(sge); from = sg_virt(sge); to = page_address(page); memcpy(to, from, a); memcpy(to + a, from + a + pop, b); sg_set_page(sge, page, a + b, 0); put_page(orig); } pop = 0; } else { pop -= (sge->length - a); sge->length = a; } } /* From above the current layout _must_ be as follows, * * -| offset * -| start * * |---- pop ---|---------------- b ------------| * |____________________________________________| length * * Offset and start of the current msg elem are equal because in the * previous case we handled offset != start and either consumed the * entire element and advanced to the next element OR pop == 0. * * Two cases to handle here are first pop is less than the length * leaving some remainder b above. Simply adjust the element's layout * in this case. Or pop >= length of the element so that b = 0. In this * case advance to next element decrementing pop. */ while (pop) { struct scatterlist *sge = sk_msg_elem(msg, i); if (pop < sge->length) { sge->length -= pop; sge->offset += pop; pop = 0; } else { pop -= sge->length; sk_msg_shift_left(msg, i); } } sk_mem_uncharge(msg->sk, len - pop); msg->sg.size -= (len - pop); sk_msg_reset_curr(msg); sk_msg_compute_data_pointers(msg); return 0; } static const struct bpf_func_proto bpf_msg_pop_data_proto = { .func = bpf_msg_pop_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; #ifdef CONFIG_CGROUP_NET_CLASSID BPF_CALL_0(bpf_get_cgroup_classid_curr) { return __task_get_classid(current); } const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { .func = bpf_get_cgroup_classid_curr, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) { struct sock *sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) return 0; return sock_cgroup_classid(&sk->sk_cgrp_data); } static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { .func = bpf_skb_cgroup_classid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; #endif BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) { return task_get_classid(skb); } static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { .func = bpf_get_cgroup_classid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) { return dst_tclassid(skb); } static const struct bpf_func_proto bpf_get_route_realm_proto = { .func = bpf_get_route_realm, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) { /* If skb_clear_hash() was called due to mangling, we can * trigger SW recalculation here. Later access to hash * can then use the inline skb->hash via context directly * instead of calling this helper again. */ return skb_get_hash(skb); } static const struct bpf_func_proto bpf_get_hash_recalc_proto = { .func = bpf_get_hash_recalc, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) { /* After all direct packet write, this can be used once for * triggering a lazy recalc on next skb_get_hash() invocation. */ skb_clear_hash(skb); return 0; } static const struct bpf_func_proto bpf_set_hash_invalid_proto = { .func = bpf_set_hash_invalid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) { /* Set user specified hash as L4(+), so that it gets returned * on skb_get_hash() call unless BPF prog later on triggers a * skb_clear_hash(). */ __skb_set_sw_hash(skb, hash, true); return 0; } static const struct bpf_func_proto bpf_set_hash_proto = { .func = bpf_set_hash, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, u16, vlan_tci) { int ret; if (unlikely(vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD))) vlan_proto = htons(ETH_P_8021Q); bpf_push_mac_rcsum(skb); ret = skb_vlan_push(skb, vlan_proto, vlan_tci); bpf_pull_mac_rcsum(skb); skb_reset_mac_len(skb); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_vlan_push_proto = { .func = bpf_skb_vlan_push, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) { int ret; bpf_push_mac_rcsum(skb); ret = skb_vlan_pop(skb); bpf_pull_mac_rcsum(skb); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { .func = bpf_skb_vlan_pop, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto) { skb->protocol = htons(proto); if (skb_valid_dst(skb)) skb_dst_drop(skb); } static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) { /* Caller already did skb_cow() with meta_len+len as headroom, * so no need to do it here. */ skb_push(skb, len); skb_postpush_data_move(skb, len, off); memset(skb->data + off, 0, len); /* No skb_postpush_rcsum(skb, skb->data + off, len) * needed here as it does not change the skb->csum * result for checksum complete when summing over * zeroed blocks. */ return 0; } static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) { void *old_data; /* skb_ensure_writable() is not needed here, as we're * already working on an uncloned skb. */ if (unlikely(!pskb_may_pull(skb, off + len))) return -ENOMEM; old_data = skb->data; __skb_pull(skb, len); skb_postpull_rcsum(skb, old_data + off, len); skb_postpull_data_move(skb, len, off); return 0; } static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* There's no need for __skb_push()/__skb_pull() pair to * get to the start of the mac header as we're guaranteed * to always start from here under eBPF. */ ret = bpf_skb_generic_push(skb, off, len); if (likely(!ret)) { skb->mac_header -= len; skb->network_header -= len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* Same here, __skb_push()/__skb_pull() pair not needed. */ ret = bpf_skb_generic_pop(skb, off, len); if (likely(!ret)) { skb->mac_header += len; skb->network_header += len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int bpf_skb_proto_4_to_6(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); const u8 meta_len = skb_metadata_len(skb); u32 off = skb_mac_header_len(skb); int ret; ret = skb_cow(skb, meta_len + len_diff); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_push(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */ if (shinfo->gso_type & SKB_GSO_TCPV4) { shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } } bpf_skb_change_protocol(skb, ETH_P_IPV6); skb_clear_hash(skb); return 0; } static int bpf_skb_proto_6_to_4(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); u32 off = skb_mac_header_len(skb); int ret; ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */ if (shinfo->gso_type & SKB_GSO_TCPV6) { shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } } bpf_skb_change_protocol(skb, ETH_P_IP); skb_clear_hash(skb); return 0; } static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) { __be16 from_proto = skb->protocol; if (from_proto == htons(ETH_P_IP) && to_proto == htons(ETH_P_IPV6)) return bpf_skb_proto_4_to_6(skb); if (from_proto == htons(ETH_P_IPV6) && to_proto == htons(ETH_P_IP)) return bpf_skb_proto_6_to_4(skb); return -ENOTSUPP; } BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, u64, flags) { int ret; if (unlikely(flags)) return -EINVAL; /* General idea is that this helper does the basic groundwork * needed for changing the protocol, and eBPF program fills the * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() * and other helpers, rather than passing a raw buffer here. * * The rationale is to keep this minimal and without a need to * deal with raw packet data. F.e. even if we would pass buffers * here, the program still needs to call the bpf_lX_csum_replace() * helpers anyway. Plus, this way we keep also separation of * concerns, since f.e. bpf_skb_store_bytes() should only take * care of stores. * * Currently, additional options and extension header space are * not supported, but flags register is reserved so we can adapt * that. For offloads, we mark packet as dodgy, so that headers * need to be verified first. */ ret = bpf_skb_proto_xlat(skb, proto); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_proto_proto = { .func = bpf_skb_change_proto, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) { /* We only allow a restricted subset to be changed for now. */ if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || !skb_pkt_type_ok(pkt_type))) return -EINVAL; skb->pkt_type = pkt_type; return 0; } static const struct bpf_func_proto bpf_skb_change_type_proto = { .func = bpf_skb_change_type, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static u32 bpf_skb_net_base_len(const struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_IP): return sizeof(struct iphdr); case htons(ETH_P_IPV6): return sizeof(struct ipv6hdr); default: return ~0U; } } #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) #define BPF_F_ADJ_ROOM_DECAP_L3_MASK (BPF_F_ADJ_ROOM_DECAP_L3_IPV4 | \ BPF_F_ADJ_ROOM_DECAP_L3_IPV6) #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \ BPF_F_ADJ_ROOM_ENCAP_L2( \ BPF_ADJ_ROOM_ENCAP_L2_MASK) | \ BPF_F_ADJ_ROOM_DECAP_L3_MASK) static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; u16 mac_len = 0, inner_net = 0, inner_trans = 0; const u8 meta_len = skb_metadata_len(skb); unsigned int gso_type = SKB_GSO_DODGY; int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) return -ENOTSUPP; } ret = skb_cow_head(skb, meta_len + len_diff); if (unlikely(ret < 0)) return ret; if (encap) { if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -ENOTSUPP; if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) return -EINVAL; if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) return -EINVAL; if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH && inner_mac_len < ETH_HLEN) return -EINVAL; if (skb->encapsulation) return -EALREADY; mac_len = skb->network_header - skb->mac_header; inner_net = skb->network_header; if (inner_mac_len > len_diff) return -EINVAL; inner_trans = skb->transport_header; } ret = bpf_skb_net_hdr_push(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (encap) { skb->inner_mac_header = inner_net - inner_mac_len; skb->inner_network_header = inner_net; skb->inner_transport_header = inner_trans; if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH) skb_set_inner_protocol(skb, htons(ETH_P_TEB)); else skb_set_inner_protocol(skb, skb->protocol); skb->encapsulation = 1; skb_set_network_header(skb, mac_len); if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) gso_type |= SKB_GSO_UDP_TUNNEL; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) gso_type |= SKB_GSO_GRE; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) gso_type |= SKB_GSO_IPXIP6; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) gso_type |= SKB_GSO_IPXIP4; if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr); skb_set_transport_header(skb, mac_len + nh_len); } /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) bpf_skb_change_protocol(skb, ETH_P_IP); } if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= gso_type; shinfo->gso_segs = 0; /* Due to header growth, MSS needs to be downgraded. * There is a BUG_ON() when segmenting the frag_list with * head_frag true, so linearize the skb after downgrading * the MSS. */ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) { skb_decrease_gso_size(shinfo, len_diff); if (shinfo->frag_list) return skb_linearize(skb); } } return 0; } static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { int ret; if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_DECAP_L3_MASK | BPF_F_ADJ_ROOM_NO_CSUM_RESET))) return -EINVAL; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) return -ENOTSUPP; } ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6) bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4) bpf_skb_change_protocol(skb, ETH_P_IP); if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Due to header shrink, MSS can be upgraded. */ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) skb_increase_gso_size(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } return 0; } #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32, mode, u64, flags) { u32 len_diff_abs = abs(len_diff); bool shrink = len_diff < 0; int ret = 0; if (unlikely(flags || mode)) return -EINVAL; if (unlikely(len_diff_abs > 0xfffU)) return -EFAULT; if (!shrink) { ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; __skb_push(skb, len_diff_abs); memset(skb->data, 0, len_diff_abs); } else { if (unlikely(!pskb_may_pull(skb, len_diff_abs))) return -ENOMEM; __skb_pull(skb, len_diff_abs); } if (tls_sw_has_ctx_rx(skb->sk)) { struct strp_msg *rxm = strp_msg(skb); rxm->full_len += len_diff; } return ret; } static const struct bpf_func_proto sk_skb_adjust_room_proto = { .func = sk_skb_adjust_room, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32, mode, u64, flags) { u32 len_cur, len_diff_abs = abs(len_diff); u32 len_min = bpf_skb_net_base_len(skb); u32 len_max = BPF_SKB_MAX_LEN; __be16 proto = skb->protocol; bool shrink = len_diff < 0; u32 off; int ret; if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | BPF_F_ADJ_ROOM_NO_CSUM_RESET))) return -EINVAL; if (unlikely(len_diff_abs > 0xfffU)) return -EFAULT; if (unlikely(proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))) return -ENOTSUPP; off = skb_mac_header_len(skb); switch (mode) { case BPF_ADJ_ROOM_NET: off += bpf_skb_net_base_len(skb); break; case BPF_ADJ_ROOM_MAC: break; default: return -ENOTSUPP; } if (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) { if (!shrink) return -EINVAL; switch (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) { case BPF_F_ADJ_ROOM_DECAP_L3_IPV4: len_min = sizeof(struct iphdr); break; case BPF_F_ADJ_ROOM_DECAP_L3_IPV6: len_min = sizeof(struct ipv6hdr); break; default: return -EINVAL; } } len_cur = skb->len - skb_network_offset(skb); if ((shrink && (len_diff_abs >= len_cur || len_cur - len_diff_abs < len_min)) || (!shrink && (skb->len + len_diff_abs > len_max && !skb_is_gso(skb)))) return -ENOTSUPP; ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : bpf_skb_net_grow(skb, off, len_diff_abs, flags); if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET)) __skb_reset_checksum_unnecessary(skb); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_adjust_room_proto = { .func = bpf_skb_adjust_room, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static u32 __bpf_skb_min_len(const struct sk_buff *skb) { int offset = skb_network_offset(skb); u32 min_len = 0; if (offset > 0) min_len = offset; if (skb_transport_header_was_set(skb)) { offset = skb_transport_offset(skb); if (offset > 0) min_len = offset; } if (skb->ip_summed == CHECKSUM_PARTIAL) { offset = skb_checksum_start_offset(skb) + skb->csum_offset + sizeof(__sum16); if (offset > 0) min_len = offset; } return min_len; } static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) { unsigned int old_len = skb->len; int ret; ret = __skb_grow_rcsum(skb, new_len); if (!ret) memset(skb->data + old_len, 0, new_len - old_len); return ret; } static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) { return __skb_trim_rcsum(skb, new_len); } static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, u64 flags) { u32 max_len = BPF_SKB_MAX_LEN; u32 min_len = __bpf_skb_min_len(skb); int ret; if (unlikely(flags || new_len > max_len || new_len < min_len)) return -EINVAL; if (skb->encapsulation) return -ENOTSUPP; /* The basic idea of this helper is that it's performing the * needed work to either grow or trim an skb, and eBPF program * rewrites the rest via helpers like bpf_skb_store_bytes(), * bpf_lX_csum_replace() and others rather than passing a raw * buffer here. This one is a slow path helper and intended * for replies with control messages. * * Like in bpf_skb_change_proto(), we want to keep this rather * minimal and without protocol specifics so that we are able * to separate concerns as in bpf_skb_store_bytes() should only * be the one responsible for writing buffers. * * It's really expected to be a slow path operation here for * control message replies, so we're implicitly linearizing, * uncloning and drop offloads from the skb by this. */ ret = __bpf_try_make_writable(skb, skb->len); if (!ret) { if (new_len > skb->len) ret = bpf_skb_grow_rcsum(skb, new_len); else if (new_len < skb->len) ret = bpf_skb_trim_rcsum(skb, new_len); if (!ret && skb_is_gso(skb)) skb_gso_reset(skb); } return ret; } BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { int ret = __bpf_skb_change_tail(skb, new_len, flags); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_tail_proto = { .func = bpf_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { return __bpf_skb_change_tail(skb, new_len, flags); } static const struct bpf_func_proto sk_skb_change_tail_proto = { .func = sk_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, u64 flags) { const u8 meta_len = skb_metadata_len(skb); u32 max_len = BPF_SKB_MAX_LEN; u32 new_len = skb->len + head_room; int ret; if (unlikely(flags || (int)head_room < 0 || (!skb_is_gso(skb) && new_len > max_len) || new_len < skb->len)) return -EINVAL; ret = skb_cow(skb, meta_len + head_room); if (likely(!ret)) { /* Idea for this helper is that we currently only * allow to expand on mac header. This means that * skb->protocol network header, etc, stay as is. * Compared to bpf_skb_change_tail(), we're more * flexible due to not needing to linearize or * reset GSO. Intention for this helper is to be * used by an L3 skb that needs to push mac header * for redirection into L2 device. */ __skb_push(skb, head_room); skb_postpush_data_move(skb, head_room, 0); memset(skb->data, 0, head_room); skb_reset_mac_header(skb); skb_reset_mac_len(skb); } return ret; } BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { int ret = __bpf_skb_change_head(skb, head_room, flags); bpf_compute_data_pointers(skb); return ret; } static const struct bpf_func_proto bpf_skb_change_head_proto = { .func = bpf_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { return __bpf_skb_change_head(skb, head_room, flags); } static const struct bpf_func_proto sk_skb_change_head_proto = { .func = sk_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) { return xdp_get_buff_len(xdp); } static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { .func = bpf_xdp_get_buff_len, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff) const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = { .func = bpf_xdp_get_buff_len, .gpl_only = false, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0], }; static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : xdp->data - xdp->data_meta; } BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); unsigned long metalen = xdp_get_metalen(xdp); void *data_start = xdp_frame_end + metalen; void *data = xdp->data + offset; if (unlikely(data < data_start || data > xdp->data_end - ETH_HLEN)) return -EINVAL; if (metalen) memmove(xdp->data_meta + offset, xdp->data_meta, metalen); xdp->data_meta += offset; xdp->data = data; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .func = bpf_xdp_adjust_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, unsigned long len, bool flush) { unsigned long ptr_len, ptr_off = 0; skb_frag_t *next_frag, *end_frag; struct skb_shared_info *sinfo; void *src, *dst; u8 *ptr_buf; if (likely(xdp->data_end - xdp->data >= off + len)) { src = flush ? buf : xdp->data + off; dst = flush ? xdp->data + off : buf; memcpy(dst, src, len); return; } sinfo = xdp_get_shared_info_from_buff(xdp); end_frag = &sinfo->frags[sinfo->nr_frags]; next_frag = &sinfo->frags[0]; ptr_len = xdp->data_end - xdp->data; ptr_buf = xdp->data; while (true) { if (off < ptr_off + ptr_len) { unsigned long copy_off = off - ptr_off; unsigned long copy_len = min(len, ptr_len - copy_off); src = flush ? buf : ptr_buf + copy_off; dst = flush ? ptr_buf + copy_off : buf; memcpy(dst, src, copy_len); off += copy_len; len -= copy_len; buf += copy_len; } if (!len || next_frag == end_frag) break; ptr_off += ptr_len; ptr_buf = skb_frag_address(next_frag); ptr_len = skb_frag_size(next_frag); next_frag++; } } void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) { u32 size = xdp->data_end - xdp->data; struct skb_shared_info *sinfo; void *addr = xdp->data; int i; if (unlikely(offset > 0xffff || len > 0xffff)) return ERR_PTR(-EFAULT); if (unlikely(offset + len > xdp_get_buff_len(xdp))) return ERR_PTR(-EINVAL); if (likely(offset < size)) /* linear area */ goto out; sinfo = xdp_get_shared_info_from_buff(xdp); offset -= size; for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ u32 frag_size = skb_frag_size(&sinfo->frags[i]); if (offset < frag_size) { addr = skb_frag_address(&sinfo->frags[i]); size = frag_size; break; } offset -= frag_size; } out: return offset + len <= size ? addr + offset : NULL; } BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, void *, buf, u32, len) { void *ptr; ptr = bpf_xdp_pointer(xdp, offset, len); if (IS_ERR(ptr)) return PTR_ERR(ptr); if (!ptr) bpf_xdp_copy_buf(xdp, offset, buf, len, false); else memcpy(buf, ptr, len); return 0; } static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { .func = bpf_xdp_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) { return ____bpf_xdp_load_bytes(xdp, offset, buf, len); } BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, void *, buf, u32, len) { void *ptr; ptr = bpf_xdp_pointer(xdp, offset, len); if (IS_ERR(ptr)) return PTR_ERR(ptr); if (!ptr) bpf_xdp_copy_buf(xdp, offset, buf, len, true); else memcpy(ptr, buf, len); return 0; } static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { .func = bpf_xdp_store_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) { return ____bpf_xdp_store_bytes(xdp, offset, buf, len); } static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1]; struct xdp_rxq_info *rxq = xdp->rxq; unsigned int tailroom; if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz) return -EOPNOTSUPP; tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag); if (unlikely(offset > tailroom)) return -EINVAL; memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); skb_frag_size_add(frag, offset); sinfo->xdp_frags_size += offset; if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) xsk_buff_get_tail(xdp)->data_end += offset; return 0; } static struct xdp_buff *bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink, bool tail, bool release) { struct xdp_buff *zc_frag = tail ? xsk_buff_get_tail(xdp) : xsk_buff_get_head(xdp); if (release) { xsk_buff_del_frag(zc_frag); } else { if (tail) zc_frag->data_end -= shrink; else zc_frag->data += shrink; } return zc_frag; } static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag, int shrink, bool tail) { enum xdp_mem_type mem_type = xdp->rxq->mem.type; bool release = skb_frag_size(frag) == shrink; netmem_ref netmem = skb_frag_netmem(frag); struct xdp_buff *zc_frag = NULL; if (mem_type == MEM_TYPE_XSK_BUFF_POOL) { netmem = 0; zc_frag = bpf_xdp_shrink_data_zc(xdp, shrink, tail, release); } if (release) { __xdp_return(netmem, mem_type, false, zc_frag); } else { if (!tail) skb_frag_off_add(frag, shrink); skb_frag_size_sub(frag, shrink); } return release; } static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i, n_frags_free = 0, len_free = 0; if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN)) return -EINVAL; for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) { skb_frag_t *frag = &sinfo->frags[i]; int shrink = min_t(int, offset, skb_frag_size(frag)); len_free += shrink; offset -= shrink; if (bpf_xdp_shrink_data(xdp, frag, shrink, true)) n_frags_free++; } sinfo->nr_frags -= n_frags_free; sinfo->xdp_frags_size -= len_free; if (unlikely(!sinfo->nr_frags)) { xdp_buff_clear_frags_flag(xdp); xdp_buff_clear_frag_pfmemalloc(xdp); xdp->data_end -= offset; } return 0; } BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) { void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ void *data_end = xdp->data_end + offset; if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */ if (offset < 0) return bpf_xdp_frags_shrink_tail(xdp, -offset); return bpf_xdp_frags_increase_tail(xdp, offset); } /* Notice that xdp_data_hard_end have reserved some tailroom */ if (unlikely(data_end > data_hard_end)) return -EINVAL; if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; /* Clear memory area on grow, can contain uninit kernel memory */ if (offset > 0) memset(xdp->data_end, 0, offset); xdp->data_end = data_end; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { .func = bpf_xdp_adjust_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); void *meta = xdp->data_meta + offset; unsigned long metalen = xdp->data - meta; if (xdp_data_meta_unsupported(xdp)) return -ENOTSUPP; if (unlikely(meta < xdp_frame_end || meta > xdp->data)) return -EINVAL; if (unlikely(xdp_metalen_invalid(metalen))) return -EACCES; xdp->data_meta = meta; return 0; } static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { .func = bpf_xdp_adjust_meta, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; /** * DOC: xdp redirect * * XDP_REDIRECT works by a three-step process, implemented in the functions * below: * * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target * of the redirect and store it (along with some other metadata) in a per-CPU * struct bpf_redirect_info. * * 2. When the program returns the XDP_REDIRECT return code, the driver will * call xdp_do_redirect() which will use the information in struct * bpf_redirect_info to actually enqueue the frame into a map type-specific * bulk queue structure. * * 3. Before exiting its NAPI poll loop, the driver will call * xdp_do_flush(), which will flush all the different bulk queues, * thus completing the redirect. Note that xdp_do_flush() must be * called before napi_complete_done() in the driver, as the * XDP_REDIRECT logic relies on being inside a single NAPI instance * through to the xdp_do_flush() call for RCU protection of all * in-kernel data structures. */ /* * Pointers to the map entries will be kept around for this whole sequence of * steps, protected by RCU. However, there is no top-level rcu_read_lock() in * the core code; instead, the RCU protection relies on everything happening * inside a single NAPI poll sequence, which means it's between a pair of calls * to local_bh_disable()/local_bh_enable(). * * The map entries are marked as __rcu and the map code makes sure to * dereference those pointers with rcu_dereference_check() in a way that works * for both sections that to hold an rcu_read_lock() and sections that are * called from NAPI without a separate rcu_read_lock(). The code below does not * use RCU annotations, but relies on those in the map code. */ void xdp_do_flush(void) { struct list_head *lh_map, *lh_dev, *lh_xsk; bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk); if (lh_dev) __dev_flush(lh_dev); if (lh_map) __cpu_map_flush(lh_map); if (lh_xsk) __xsk_map_flush(lh_xsk); } EXPORT_SYMBOL_GPL(xdp_do_flush); #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) void xdp_do_check_flushed(struct napi_struct *napi) { struct list_head *lh_map, *lh_dev, *lh_xsk; bool missed = false; bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk); if (lh_dev) { __dev_flush(lh_dev); missed = true; } if (lh_map) { __cpu_map_flush(lh_map); missed = true; } if (lh_xsk) { __xsk_map_flush(lh_xsk); missed = true; } WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n", napi->poll); } #endif DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); u32 xdp_master_redirect(struct xdp_buff *xdp) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct net_device *master, *slave; master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); if (slave && slave != xdp->rxq->dev) { /* The target device is different from the receiving device, so * redirect it to the new device. * Using XDP_REDIRECT gets the correct behaviour from XDP enabled * drivers to unmap the packet from their rx ring. */ ri->tgt_index = slave->ifindex; ri->map_id = INT_MAX; ri->map_type = BPF_MAP_TYPE_UNSPEC; return XDP_REDIRECT; } return XDP_TX; } EXPORT_SYMBOL_GPL(xdp_master_redirect); static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, const struct net_device *dev, struct xdp_buff *xdp, const struct bpf_prog *xdp_prog) { enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ ri->map_type = BPF_MAP_TYPE_UNSPEC; err = __xsk_map_redirect(fwd, xdp); if (unlikely(err)) goto err; _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); return 0; err: _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); return err; } static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, struct net_device *dev, struct xdp_frame *xdpf, const struct bpf_prog *xdp_prog) { enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; u32 flags = ri->flags; struct bpf_map *map; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (unlikely(!xdpf)) { err = -EOVERFLOW; goto err; } switch (map_type) { case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: if (unlikely(flags & BPF_F_BROADCAST)) { map = READ_ONCE(ri->map); /* The map pointer is cleared when the map is being torn * down by dev_map_free() */ if (unlikely(!map)) { err = -ENOENT; break; } WRITE_ONCE(ri->map, NULL); err = dev_map_enqueue_multi(xdpf, dev, map, flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_enqueue(fwd, xdpf, dev); } break; case BPF_MAP_TYPE_CPUMAP: err = cpu_map_enqueue(fwd, xdpf, dev); break; case BPF_MAP_TYPE_UNSPEC: if (map_id == INT_MAX) { fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); if (unlikely(!fwd)) { err = -EINVAL; break; } err = dev_xdp_enqueue(fwd, xdpf, dev); break; } fallthrough; default: err = -EBADRQC; } if (unlikely(err)) goto err; _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); return 0; err: _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); return err; } int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, const struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; if (map_type == BPF_MAP_TYPE_XSKMAP) return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp), xdp_prog); } EXPORT_SYMBOL_GPL(xdp_do_redirect); int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, struct xdp_frame *xdpf, const struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; if (map_type == BPF_MAP_TYPE_XSKMAP) return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog); } EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); static int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, const struct bpf_prog *xdp_prog, void *fwd, enum bpf_map_type map_type, u32 map_id, u32 flags) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct bpf_map *map; int err; switch (map_type) { case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: if (unlikely(flags & BPF_F_BROADCAST)) { map = READ_ONCE(ri->map); /* The map pointer is cleared when the map is being torn * down by dev_map_free() */ if (unlikely(!map)) { err = -ENOENT; break; } WRITE_ONCE(ri->map, NULL); err = dev_map_redirect_multi(dev, skb, xdp_prog, map, flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_generic_redirect(fwd, skb, xdp_prog); } if (unlikely(err)) goto err; break; case BPF_MAP_TYPE_XSKMAP: err = xsk_generic_rcv(fwd, xdp); if (err) goto err; consume_skb(skb); break; case BPF_MAP_TYPE_CPUMAP: err = cpu_map_generic_redirect(fwd, skb); if (unlikely(err)) goto err; break; default: err = -EBADRQC; goto err; } _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); return 0; err: _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); return err; } int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, const struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; u32 flags = ri->flags; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } err = xdp_ok_fwd_dev(fwd, skb->len); if (unlikely(err)) goto err; skb->dev = fwd; _trace_xdp_redirect(dev, xdp_prog, ri->tgt_index); generic_xdp_tx(skb, xdp_prog); return 0; } return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags); err: _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); return err; } BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags)) return XDP_ABORTED; /* NB! Map type UNSPEC and map_id == INT_MAX (never generated * by map_idr) is used for ifindex based XDP redirect. */ ri->tgt_index = ifindex; ri->map_id = INT_MAX; ri->map_type = BPF_MAP_TYPE_UNSPEC; return XDP_REDIRECT; } static const struct bpf_func_proto bpf_xdp_redirect_proto = { .func = bpf_xdp_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u64, key, u64, flags) { return map->ops->map_redirect(map, key, flags); } static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { .func = bpf_xdp_redirect_map, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, unsigned long off, unsigned long len) { void *ptr = skb_header_pointer(skb, off, len, dst_buff); if (unlikely(!ptr)) return len; if (ptr != dst_buff) memcpy(dst_buff, ptr, len); return 0; } BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, u64, flags, void *, meta, u64, meta_size) { u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; if (unlikely(!skb || skb_size > skb->len)) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, bpf_skb_copy); } static const struct bpf_func_proto bpf_skb_event_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff) const struct bpf_func_proto bpf_skb_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &bpf_skb_output_btf_ids[0], .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; static unsigned short bpf_tunnel_key_af(u64 flags) { return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; } BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, u32, size, u64, flags) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); u8 compat[sizeof(struct bpf_tunnel_key)]; void *to_orig = to; int err; if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_TUNINFO_FLAGS)))) { err = -EINVAL; goto err_clear; } if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { err = -EPROTO; goto err_clear; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) { err = -EINVAL; switch (size) { case offsetof(struct bpf_tunnel_key, local_ipv6[0]): case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): goto set_compat; case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ if (ip_tunnel_info_af(info) != AF_INET) goto err_clear; set_compat: to = (struct bpf_tunnel_key *)compat; break; default: goto err_clear; } } to->tunnel_id = be64_to_cpu(info->key.tun_id); to->tunnel_tos = info->key.tos; to->tunnel_ttl = info->key.ttl; if (flags & BPF_F_TUNINFO_FLAGS) to->tunnel_flags = ip_tunnel_flags_to_be16(info->key.tun_flags); else to->tunnel_ext = 0; if (flags & BPF_F_TUNINFO_IPV6) { memcpy(to->remote_ipv6, &info->key.u.ipv6.src, sizeof(to->remote_ipv6)); memcpy(to->local_ipv6, &info->key.u.ipv6.dst, sizeof(to->local_ipv6)); to->tunnel_label = be32_to_cpu(info->key.label); } else { to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); to->local_ipv4 = be32_to_cpu(info->key.u.ipv4.dst); memset(&to->local_ipv6[1], 0, sizeof(__u32) * 3); to->tunnel_label = 0; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) memcpy(to_orig, to, size); return 0; err_clear: memset(to_orig, 0, size); return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { .func = bpf_skb_get_tunnel_key, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); int err; if (unlikely(!info || !ip_tunnel_is_options_present(info->key.tun_flags))) { err = -ENOENT; goto err_clear; } if (unlikely(size < info->options_len)) { err = -ENOMEM; goto err_clear; } ip_tunnel_info_opts_get(to, info); if (size > info->options_len) memset(to + info->options_len, 0, size - info->options_len); return info->options_len; err_clear: memset(to, 0, size); return err; } static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { .func = bpf_skb_get_tunnel_opt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, }; static struct metadata_dst __percpu *md_dst; BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, const struct bpf_tunnel_key *, from, u32, size, u64, flags) { struct metadata_dst *md = this_cpu_ptr(md_dst); u8 compat[sizeof(struct bpf_tunnel_key)]; struct ip_tunnel_info *info; if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER | BPF_F_NO_TUNNEL_KEY))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { case offsetof(struct bpf_tunnel_key, local_ipv6[0]): case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ memcpy(compat, from, size); memset(compat + size, 0, sizeof(compat) - size); from = (const struct bpf_tunnel_key *) compat; break; default: return -EINVAL; } } if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || from->tunnel_ext)) return -EINVAL; skb_dst_drop(skb); dst_hold((struct dst_entry *) md); skb_dst_set(skb, (struct dst_entry *) md); info = &md->u.tun_info; memset(info, 0, sizeof(*info)); info->mode = IP_TUNNEL_INFO_TX; __set_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); __assign_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags, flags & BPF_F_DONT_FRAGMENT); __assign_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags, !(flags & BPF_F_ZERO_CSUM_TX)); __assign_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags, flags & BPF_F_SEQ_NUMBER); __assign_bit(IP_TUNNEL_KEY_BIT, info->key.tun_flags, !(flags & BPF_F_NO_TUNNEL_KEY)); info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; info->key.ttl = from->tunnel_ttl; if (flags & BPF_F_TUNINFO_IPV6) { info->mode |= IP_TUNNEL_INFO_IPV6; memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, sizeof(from->remote_ipv6)); memcpy(&info->key.u.ipv6.src, from->local_ipv6, sizeof(from->local_ipv6)); info->key.label = |