Total coverage: 55286 (5%)of 1157280
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 // SPDX-License-Identifier: GPL-2.0+ /* * PlayStation 2 Trance Vibrator driver * * Copyright (C) 2006 Sam Hocevar <sam@zoy.org> */ /* Standard include files */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #define DRIVER_AUTHOR "Sam Hocevar, sam@zoy.org" #define DRIVER_DESC "PlayStation 2 Trance Vibrator driver" #define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ #define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ static const struct usb_device_id id_table[] = { { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, { }, }; MODULE_DEVICE_TABLE (usb, id_table); /* Driver-local specific stuff */ struct trancevibrator { struct usb_device *udev; unsigned int speed; }; static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); return sprintf(buf, "%d\n", tv->speed); } static ssize_t speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); int temp, retval, old; retval = kstrtoint(buf, 10, &temp); if (retval) return retval; if (temp > 255) temp = 255; else if (temp < 0) temp = 0; old = tv->speed; tv->speed = temp; dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed); /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); return retval; } return count; } static DEVICE_ATTR_RW(speed); static struct attribute *tv_attrs[] = { &dev_attr_speed.attr, NULL, }; ATTRIBUTE_GROUPS(tv); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct trancevibrator *dev; int retval; dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->udev = usb_get_dev(udev); usb_set_intfdata(interface, dev); return 0; error: kfree(dev); return retval; } static void tv_disconnect(struct usb_interface *interface) { struct trancevibrator *dev; dev = usb_get_intfdata (interface); usb_set_intfdata(interface, NULL); usb_put_dev(dev->udev); kfree(dev); } /* USB subsystem object */ static struct usb_driver tv_driver = { .name = "trancevibrator", .probe = tv_probe, .disconnect = tv_disconnect, .id_table = id_table, .dev_groups = tv_groups, }; module_usb_driver(tv_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
11 12 173 173 171 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 // SPDX-License-Identifier: GPL-2.0-only /* Common code for 32 and 64-bit NUMA */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/ctype.h> #include <linux/nodemask.h> #include <linux/sched.h> #include <linux/topology.h> #include <linux/sort.h> #include <linux/numa_memblks.h> #include <asm/e820/api.h> #include <asm/proto.h> #include <asm/dma.h> #include <asm/amd_nb.h> #include "numa_internal.h" int numa_off; static __init int numa_setup(char *opt) { if (!opt) return -EINVAL; if (!strncmp(opt, "off", 3)) numa_off = 1; if (!strncmp(opt, "fake=", 5)) return numa_emu_cmdline(opt + 5); if (!strncmp(opt, "noacpi", 6)) disable_srat(); if (!strncmp(opt, "nohmat", 6)) disable_hmat(); return 0; } early_param("numa", numa_setup); /* * apicid, cpu, node mappings */ s16 __apicid_to_node[MAX_LOCAL_APIC] = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; int numa_cpu_node(int cpu) { u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) return __apicid_to_node[apicid]; return NUMA_NO_NODE; } cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* * Map cpu index to node index */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); void numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); /* early setting, no percpu area yet */ if (cpu_to_node_map) { cpu_to_node_map[cpu] = node; return; } #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); dump_stack(); return; } #endif per_cpu(x86_cpu_to_node_map, cpu) = node; set_cpu_numa_node(cpu, node); } void numa_clear_node(int cpu) { numa_set_node(cpu, NUMA_NO_NODE); } /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init numa_register_nodes(void) { int nid; if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL; /* Finally register nodes. */ for_each_node_mask(nid, node_possible_map) { unsigned long start_pfn, end_pfn; /* * Note, get_pfn_range_for_nid() depends on * memblock_set_node() having already happened */ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (start_pfn >= end_pfn) continue; alloc_node_data(nid); node_set_online(nid); } /* Dump memblock with node info and return. */ memblock_dump_all(); return 0; } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ static void __init numa_init_array(void) { int rr, i; rr = first_node(node_online_map); for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); rr = next_node_in(rr, node_online_map); } } static int __init numa_init(int (*init_func)(void)) { int i; int ret; for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret; ret = numa_register_nodes(); if (ret < 0) return ret; for (i = 0; i < nr_cpu_ids; i++) { int nid = early_cpu_to_node(i); if (nid == NUMA_NO_NODE) continue; if (!node_online(nid)) numa_clear_node(i); } numa_init_array(); return 0; } /** * dummy_numa_init - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node and add memory blocks that cover all * allowed memory. This function must not fail. */ static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 0LLU, PFN_PHYS(max_pfn) - 1); node_set(0, numa_nodes_parsed); numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); return 0; } /** * x86_numa_init - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory and * never fails. */ void __init x86_numa_init(void) { if (!numa_off) { #ifdef CONFIG_ACPI_NUMA if (!numa_init(x86_acpi_numa_init)) return; #endif #ifdef CONFIG_AMD_NUMA if (!numa_init(amd_numa_init)) return; #endif if (acpi_disabled && !numa_init(of_numa_init)) return; } numa_init(dummy_numa_init); } /* * A node may exist which has one or more Generic Initiators but no CPUs and no * memory. * * This function must be called after init_cpu_to_node(), to ensure that any * memoryless CPU nodes have already been brought online, and before the * node_data[nid] is needed for zone list setup in build_all_zonelists(). * * When this function is called, any nodes containing either memory and/or CPUs * will already be online and there is no need to do anything extra, even if * they also contain one or more Generic Initiators. */ void __init init_gi_nodes(void) { int nid; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid)) node_set_online(nid); } /* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. * * Called before the per_cpu areas are setup. */ void __init init_cpu_to_node(void) { int cpu; u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); BUG_ON(cpu_to_apicid == NULL); for_each_possible_cpu(cpu) { int node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) continue; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ if (!node_online(node)) node_set_online(node); numa_set_node(cpu, node); } } #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU void numa_add_cpu(unsigned int cpu) { cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } void numa_remove_cpu(unsigned int cpu) { cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } # endif /* !CONFIG_NUMA_EMU */ #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ int __cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) { printk(KERN_WARNING "cpu_to_node(%d): usage too early!\n", cpu); dump_stack(); return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; } return per_cpu(x86_cpu_to_node_map, cpu); } EXPORT_SYMBOL(__cpu_to_node); /* * Same function as cpu_to_node() but used if called before the * per_cpu areas are setup. */ int early_cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; if (!cpu_possible(cpu)) { printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu); dump_stack(); return NUMA_NO_NODE; } return per_cpu(x86_cpu_to_node_map, cpu); } void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) { struct cpumask *mask; if (node == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ return; } mask = node_to_cpumask_map[node]; if (!cpumask_available(mask)) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); return; } if (enable) cpumask_set_cpu(cpu, mask); else cpumask_clear_cpu(cpu, mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, cpumask_pr_args(mask)); return; } # ifndef CONFIG_NUMA_EMU static void numa_set_cpumask(int cpu, bool enable) { debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } void numa_add_cpu(unsigned int cpu) { numa_set_cpumask(cpu, true); } void numa_remove_cpu(unsigned int cpu) { numa_set_cpumask(cpu, false); } # endif /* !CONFIG_NUMA_EMU */ /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { if ((unsigned)node >= nr_node_ids) { printk(KERN_WARNING "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; } if (!cpumask_available(node_to_cpumask_map[node])) { printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); dump_stack(); return cpu_online_mask; } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ #ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids) { int i, j; /* * Transform __apicid_to_node table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case. */ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { if (__apicid_to_node[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (__apicid_to_node[i] == emu_nid_to_phys[j]) break; __apicid_to_node[i] = j < nr_emu_nids ? j : 0; } } u64 __init numa_emu_dma_end(void) { return PFN_PHYS(MAX_DMA32_PFN); } #endif /* CONFIG_NUMA_EMU */
2 2 2 2 37 23 23 23 23 23 3 3 3 2 2 2 1 53 16 1 4 4 3 2 55 55 55 55 55 53 55 281 281 280 280 2 2 2 2 1 281 281 280 244 53 48 53 53 53 47 53 47 53 53 53 228 228 12 228 281 267 1 266 6 267 55 55 2 55 24 54 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 // SPDX-License-Identifier: GPL-2.0-or-later /* * Main USB camera driver * * Copyright (C) 2008-2011 Jean-François Moine <http://moinejf.free.fr> * * Camera button input handling by Márton Németh * Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define GSPCA_VERSION "2.14.0" #include <linux/init.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/io.h> #include <asm/page.h> #include <linux/uaccess.h> #include <linux/ktime.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include "gspca.h" #if IS_ENABLED(CONFIG_INPUT) #include <linux/input.h> #include <linux/usb/input.h> #endif /* global values */ #define DEF_NURBS 3 /* default number of URBs */ #if DEF_NURBS > MAX_NURBS #error "DEF_NURBS too big" #endif MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("GSPCA USB Camera Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(GSPCA_VERSION); int gspca_debug; EXPORT_SYMBOL(gspca_debug); static void PDEBUG_MODE(struct gspca_dev *gspca_dev, int debug, char *txt, __u32 pixfmt, int w, int h) { if ((pixfmt >> 24) >= '0' && (pixfmt >> 24) <= 'z') { gspca_dbg(gspca_dev, debug, "%s %c%c%c%c %dx%d\n", txt, pixfmt & 0xff, (pixfmt >> 8) & 0xff, (pixfmt >> 16) & 0xff, pixfmt >> 24, w, h); } else { gspca_dbg(gspca_dev, debug, "%s 0x%08x %dx%d\n", txt, pixfmt, w, h); } } /* specific memory types - !! should be different from V4L2_MEMORY_xxx */ #define GSPCA_MEMORY_NO 0 /* V4L2_MEMORY_xxx starts from 1 */ #define GSPCA_MEMORY_READ 7 /* * Input and interrupt endpoint handling functions */ #if IS_ENABLED(CONFIG_INPUT) static void int_irq(struct urb *urb) { struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; int ret; ret = urb->status; switch (ret) { case 0: if (gspca_dev->sd_desc->int_pkt_scan(gspca_dev, urb->transfer_buffer, urb->actual_length) < 0) { gspca_err(gspca_dev, "Unknown packet received\n"); } break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: /* Stop is requested either by software or hardware is gone, * keep the ret value non-zero and don't resubmit later. */ break; default: gspca_err(gspca_dev, "URB error %i, resubmitting\n", urb->status); urb->status = 0; ret = 0; } if (ret == 0) { ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) pr_err("Resubmit URB failed with error %i\n", ret); } } static int gspca_input_connect(struct gspca_dev *dev) { struct input_dev *input_dev; int err = 0; dev->input_dev = NULL; if (dev->sd_desc->int_pkt_scan || dev->sd_desc->other_input) { input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; usb_make_path(dev->dev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); input_dev->name = dev->sd_desc->name; input_dev->phys = dev->phys; usb_to_input_id(dev->dev, &input_dev->id); input_dev->evbit[0] = BIT_MASK(EV_KEY); input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA); input_dev->dev.parent = &dev->dev->dev; err = input_register_device(input_dev); if (err) { pr_err("Input device registration failed with error %i\n", err); input_dev->dev.parent = NULL; input_free_device(input_dev); } else { dev->input_dev = input_dev; } } return err; } static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, struct usb_endpoint_descriptor *ep) { unsigned int buffer_len; int interval; struct urb *urb; struct usb_device *dev; void *buffer = NULL; int ret = -EINVAL; buffer_len = le16_to_cpu(ep->wMaxPacketSize); interval = ep->bInterval; gspca_dbg(gspca_dev, D_CONF, "found int in endpoint: 0x%x, buffer_len=%u, interval=%u\n", ep->bEndpointAddress, buffer_len, interval); dev = gspca_dev->dev; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { ret = -ENOMEM; goto error; } buffer = usb_alloc_coherent(dev, buffer_len, GFP_KERNEL, &urb->transfer_dma); if (!buffer) { ret = -ENOMEM; goto error_buffer; } usb_fill_int_urb(urb, dev, usb_rcvintpipe(dev, ep->bEndpointAddress), buffer, buffer_len, int_irq, (void *)gspca_dev, interval); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { gspca_err(gspca_dev, "submit int URB failed with error %i\n", ret); goto error_submit; } gspca_dev->int_urb = urb; return ret; error_submit: usb_free_coherent(dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); error_buffer: usb_free_urb(urb); error: return ret; } static void gspca_input_create_urb(struct gspca_dev *gspca_dev) { struct usb_interface *intf; struct usb_host_interface *intf_desc; struct usb_endpoint_descriptor *ep; int i; if (gspca_dev->sd_desc->int_pkt_scan) { intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); intf_desc = intf->cur_altsetting; for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep = &intf_desc->endpoint[i].desc; if (usb_endpoint_dir_in(ep) && usb_endpoint_xfer_int(ep)) { alloc_and_submit_int_urb(gspca_dev, ep); break; } } } } static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev) { struct urb *urb; urb = gspca_dev->int_urb; if (urb) { gspca_dev->int_urb = NULL; usb_kill_urb(urb); usb_free_coherent(gspca_dev->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } #else static inline void gspca_input_destroy_urb(struct gspca_dev *gspca_dev) { } static inline void gspca_input_create_urb(struct gspca_dev *gspca_dev) { } static inline int gspca_input_connect(struct gspca_dev *dev) { return 0; } #endif /* * fill a video frame from an URB and resubmit */ static void fill_frame(struct gspca_dev *gspca_dev, struct urb *urb) { u8 *data; /* address of data in the iso message */ int i, len, st; cam_pkt_op pkt_scan; if (urb->status != 0) { if (urb->status == -ESHUTDOWN) return; /* disconnection */ #ifdef CONFIG_PM if (gspca_dev->frozen) return; #endif gspca_err(gspca_dev, "urb status: %d\n", urb->status); urb->status = 0; goto resubmit; } pkt_scan = gspca_dev->sd_desc->pkt_scan; for (i = 0; i < urb->number_of_packets; i++) { len = urb->iso_frame_desc[i].actual_length; /* check the packet status and length */ st = urb->iso_frame_desc[i].status; if (st) { gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n", i, len, st); gspca_dev->last_packet_type = DISCARD_PACKET; continue; } if (len == 0) { if (gspca_dev->empty_packet == 0) gspca_dev->empty_packet = 1; continue; } /* let the packet be analyzed by the subdriver */ gspca_dbg(gspca_dev, D_PACK, "packet [%d] o:%d l:%d\n", i, urb->iso_frame_desc[i].offset, len); data = (u8 *) urb->transfer_buffer + urb->iso_frame_desc[i].offset; pkt_scan(gspca_dev, data, len); } resubmit: if (!gspca_dev->streaming) return; /* resubmit the URB */ st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) pr_err("usb_submit_urb() ret %d\n", st); } /* * ISOC message interrupt from the USB device * * Analyse each packet and call the subdriver for copy to the frame buffer. */ static void isoc_irq(struct urb *urb) { struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; gspca_dbg(gspca_dev, D_PACK, "isoc irq\n"); if (!gspca_dev->streaming) return; fill_frame(gspca_dev, urb); } /* * bulk message interrupt from the USB device */ static void bulk_irq(struct urb *urb) { struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; int st; gspca_dbg(gspca_dev, D_PACK, "bulk irq\n"); if (!gspca_dev->streaming) return; switch (urb->status) { case 0: break; case -ESHUTDOWN: return; /* disconnection */ default: #ifdef CONFIG_PM if (gspca_dev->frozen) return; #endif gspca_err(gspca_dev, "urb status: %d\n", urb->status); urb->status = 0; goto resubmit; } gspca_dbg(gspca_dev, D_PACK, "packet l:%d\n", urb->actual_length); gspca_dev->sd_desc->pkt_scan(gspca_dev, urb->transfer_buffer, urb->actual_length); resubmit: if (!gspca_dev->streaming) return; /* resubmit the URB */ if (gspca_dev->cam.bulk_nurbs != 0) { st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) pr_err("usb_submit_urb() ret %d\n", st); } } /* * add data to the current frame * * This function is called by the subdrivers at interrupt level. * * To build a frame, these ones must add * - one FIRST_PACKET * - 0 or many INTER_PACKETs * - one LAST_PACKET * DISCARD_PACKET invalidates the whole frame. */ void gspca_frame_add(struct gspca_dev *gspca_dev, enum gspca_packet_type packet_type, const u8 *data, int len) { struct gspca_buffer *buf; unsigned long flags; gspca_dbg(gspca_dev, D_PACK, "add t:%d l:%d\n", packet_type, len); spin_lock_irqsave(&gspca_dev->qlock, flags); buf = list_first_entry_or_null(&gspca_dev->buf_list, typeof(*buf), list); spin_unlock_irqrestore(&gspca_dev->qlock, flags); if (packet_type == FIRST_PACKET) { /* if there is no queued buffer, discard the whole frame */ if (!buf) { gspca_dev->last_packet_type = DISCARD_PACKET; gspca_dev->sequence++; return; } gspca_dev->image = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); gspca_dev->image_len = 0; } else { switch (gspca_dev->last_packet_type) { case DISCARD_PACKET: if (packet_type == LAST_PACKET) { gspca_dev->last_packet_type = packet_type; gspca_dev->image = NULL; gspca_dev->image_len = 0; } return; case LAST_PACKET: return; } } /* append the packet to the frame buffer */ if (len > 0) { if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) { gspca_err(gspca_dev, "frame overflow %d > %d\n", gspca_dev->image_len + len, PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)); packet_type = DISCARD_PACKET; } else { /* !! image is NULL only when last pkt is LAST or DISCARD if (gspca_dev->image == NULL) { pr_err("gspca_frame_add() image == NULL\n"); return; } */ memcpy(gspca_dev->image + gspca_dev->image_len, data, len); gspca_dev->image_len += len; } } gspca_dev->last_packet_type = packet_type; /* if last packet, invalidate packet concatenation until * next first packet, wake up the application and advance * in the queue */ if (packet_type == LAST_PACKET) { if (gspca_dev->image_len > gspca_dev->pixfmt.sizeimage) gspca_dev->image_len = gspca_dev->pixfmt.sizeimage; spin_lock_irqsave(&gspca_dev->qlock, flags); list_del(&buf->list); spin_unlock_irqrestore(&gspca_dev->qlock, flags); buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, gspca_dev->image_len); buf->vb.sequence = gspca_dev->sequence++; buf->vb.field = V4L2_FIELD_NONE; gspca_dbg(gspca_dev, D_FRAM, "frame complete len:%d\n", gspca_dev->image_len); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); gspca_dev->image = NULL; gspca_dev->image_len = 0; } } EXPORT_SYMBOL(gspca_frame_add); static void destroy_urbs(struct gspca_dev *gspca_dev) { struct urb *urb; unsigned int i; gspca_dbg(gspca_dev, D_STREAM, "kill transfer\n"); /* Killing all URBs guarantee that no URB completion * handler is running. Therefore, there shouldn't * be anyone trying to access gspca_dev->urb[i] */ for (i = 0; i < MAX_NURBS; i++) usb_kill_urb(gspca_dev->urb[i]); gspca_dbg(gspca_dev, D_STREAM, "releasing urbs\n"); for (i = 0; i < MAX_NURBS; i++) { urb = gspca_dev->urb[i]; if (!urb) continue; gspca_dev->urb[i] = NULL; usb_free_coherent(gspca_dev->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } static int gspca_set_alt0(struct gspca_dev *gspca_dev) { int ret; if (gspca_dev->alt == 0) return 0; ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0); if (ret < 0) pr_err("set alt 0 err %d\n", ret); return ret; } /* * look for an input transfer endpoint in an alternate setting. * * If xfer_ep is invalid, return the first valid ep found, otherwise * look for exactly the ep with address equal to xfer_ep. */ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt, int xfer, int xfer_ep) { struct usb_host_endpoint *ep; int i, attr; for (i = 0; i < alt->desc.bNumEndpoints; i++) { ep = &alt->endpoint[i]; attr = ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; if (attr == xfer && ep->desc.wMaxPacketSize != 0 && usb_endpoint_dir_in(&ep->desc) && (xfer_ep < 0 || ep->desc.bEndpointAddress == xfer_ep)) return ep; } return NULL; } /* compute the minimum bandwidth for the current transfer */ static u32 which_bandwidth(struct gspca_dev *gspca_dev) { u32 bandwidth; /* get the (max) image size */ bandwidth = gspca_dev->pixfmt.sizeimage; /* if the image is compressed, estimate its mean size */ if (!gspca_dev->cam.needs_full_bandwidth && bandwidth < gspca_dev->pixfmt.width * gspca_dev->pixfmt.height) bandwidth = bandwidth * 3 / 8; /* 0.375 */ /* estimate the frame rate */ if (gspca_dev->sd_desc->get_streamparm) { struct v4l2_streamparm parm; gspca_dev->sd_desc->get_streamparm(gspca_dev, &parm); bandwidth *= parm.parm.capture.timeperframe.denominator; bandwidth /= parm.parm.capture.timeperframe.numerator; } else { /* don't hope more than 15 fps with USB 1.1 and * image resolution >= 640x480 */ if (gspca_dev->pixfmt.width >= 640 && gspca_dev->dev->speed == USB_SPEED_FULL) bandwidth *= 15; /* 15 fps */ else bandwidth *= 30; /* 30 fps */ } gspca_dbg(gspca_dev, D_STREAM, "min bandwidth: %d\n", bandwidth); return bandwidth; } /* endpoint table */ #define MAX_ALT 16 struct ep_tb_s { u32 alt; u32 bandwidth; }; /* * build the table of the endpoints * and compute the minimum bandwidth for the image transfer */ static int build_isoc_ep_tb(struct gspca_dev *gspca_dev, struct usb_interface *intf, struct ep_tb_s *ep_tb) { struct usb_host_endpoint *ep; int i, j, nbalt, psize, found; u32 bandwidth, last_bw; nbalt = intf->num_altsetting; if (nbalt > MAX_ALT) nbalt = MAX_ALT; /* fixme: should warn */ /* build the endpoint table */ i = 0; last_bw = 0; for (;;) { ep_tb->bandwidth = 2000 * 2000 * 120; found = 0; for (j = 0; j < nbalt; j++) { ep = alt_xfer(&intf->altsetting[j], USB_ENDPOINT_XFER_ISOC, gspca_dev->xfer_ep); if (ep == NULL) continue; if (ep->desc.bInterval == 0) { pr_err("alt %d iso endp with 0 interval\n", j); continue; } psize = le16_to_cpu(ep->desc.wMaxPacketSize); psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3)); bandwidth = psize * 1000; if (gspca_dev->dev->speed == USB_SPEED_HIGH || gspca_dev->dev->speed >= USB_SPEED_SUPER) bandwidth *= 8; bandwidth /= 1 << (ep->desc.bInterval - 1); if (bandwidth <= last_bw) continue; if (bandwidth < ep_tb->bandwidth) { ep_tb->bandwidth = bandwidth; ep_tb->alt = j; found = 1; } } if (!found) break; gspca_dbg(gspca_dev, D_STREAM, "alt %d bandwidth %d\n", ep_tb->alt, ep_tb->bandwidth); last_bw = ep_tb->bandwidth; i++; ep_tb++; } /* * If the camera: * has a usb audio class interface (a built in usb mic); and * is a usb 1 full speed device; and * uses the max full speed iso bandwidth; and * and has more than 1 alt setting * then skip the highest alt setting to spare bandwidth for the mic */ if (gspca_dev->audio && gspca_dev->dev->speed == USB_SPEED_FULL && last_bw >= 1000000 && i > 1) { gspca_dbg(gspca_dev, D_STREAM, "dev has usb audio, skipping highest alt\n"); i--; ep_tb--; } /* get the requested bandwidth and start at the highest atlsetting */ bandwidth = which_bandwidth(gspca_dev); ep_tb--; while (i > 1) { ep_tb--; if (ep_tb->bandwidth < bandwidth) break; i--; } return i; } /* * create the URBs for image transfer */ static int create_urbs(struct gspca_dev *gspca_dev, struct usb_host_endpoint *ep) { struct urb *urb; int n, nurbs, i, psize, npkt, bsize; /* calculate the packet size and the number of packets */ psize = le16_to_cpu(ep->desc.wMaxPacketSize); if (!gspca_dev->cam.bulk) { /* isoc */ /* See paragraph 5.9 / table 5-11 of the usb 2.0 spec. */ if (gspca_dev->pkt_size == 0) psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3)); else psize = gspca_dev->pkt_size; npkt = gspca_dev->cam.npkt; if (npkt == 0) npkt = 32; /* default value */ bsize = psize * npkt; gspca_dbg(gspca_dev, D_STREAM, "isoc %d pkts size %d = bsize:%d\n", npkt, psize, bsize); nurbs = DEF_NURBS; } else { /* bulk */ npkt = 0; bsize = gspca_dev->cam.bulk_size; if (bsize == 0) bsize = psize; gspca_dbg(gspca_dev, D_STREAM, "bulk bsize:%d\n", bsize); if (gspca_dev->cam.bulk_nurbs != 0) nurbs = gspca_dev->cam.bulk_nurbs; else nurbs = 1; } for (n = 0; n < nurbs; n++) { urb = usb_alloc_urb(npkt, GFP_KERNEL); if (!urb) return -ENOMEM; gspca_dev->urb[n] = urb; urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev, bsize, GFP_KERNEL, &urb->transfer_dma); if (urb->transfer_buffer == NULL) { pr_err("usb_alloc_coherent failed\n"); return -ENOMEM; } urb->dev = gspca_dev->dev; urb->context = gspca_dev; urb->transfer_buffer_length = bsize; if (npkt != 0) { /* ISOC */ urb->pipe = usb_rcvisocpipe(gspca_dev->dev, ep->desc.bEndpointAddress); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = 1 << (ep->desc.bInterval - 1); urb->complete = isoc_irq; urb->number_of_packets = npkt; for (i = 0; i < npkt; i++) { urb->iso_frame_desc[i].length = psize; urb->iso_frame_desc[i].offset = psize * i; } } else { /* bulk */ urb->pipe = usb_rcvbulkpipe(gspca_dev->dev, ep->desc.bEndpointAddress); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->complete = bulk_irq; } } return 0; } /* Note: both the queue and the usb locks should be held when calling this */ static void gspca_stream_off(struct gspca_dev *gspca_dev) { gspca_dev->streaming = false; gspca_dev->usb_err = 0; if (gspca_dev->sd_desc->stopN) gspca_dev->sd_desc->stopN(gspca_dev); destroy_urbs(gspca_dev); gspca_input_destroy_urb(gspca_dev); gspca_set_alt0(gspca_dev); if (gspca_dev->present) gspca_input_create_urb(gspca_dev); if (gspca_dev->sd_desc->stop0) gspca_dev->sd_desc->stop0(gspca_dev); gspca_dbg(gspca_dev, D_STREAM, "stream off OK\n"); } /* * start the USB transfer */ static int gspca_init_transfer(struct gspca_dev *gspca_dev) { struct usb_interface *intf; struct usb_host_endpoint *ep; struct urb *urb; struct ep_tb_s ep_tb[MAX_ALT]; int n, ret, xfer, alt, alt_idx; /* reset the streaming variables */ gspca_dev->image = NULL; gspca_dev->image_len = 0; gspca_dev->last_packet_type = DISCARD_PACKET; gspca_dev->usb_err = 0; /* do the specific subdriver stuff before endpoint selection */ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0; if (gspca_dev->sd_desc->isoc_init) { ret = gspca_dev->sd_desc->isoc_init(gspca_dev); if (ret < 0) return ret; } xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK : USB_ENDPOINT_XFER_ISOC; /* if bulk or the subdriver forced an altsetting, get the endpoint */ if (gspca_dev->alt != 0) { gspca_dev->alt--; /* (previous version compatibility) */ ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer, gspca_dev->xfer_ep); if (ep == NULL) { pr_err("bad altsetting %d\n", gspca_dev->alt); return -EIO; } ep_tb[0].alt = gspca_dev->alt; alt_idx = 1; } else { /* else, compute the minimum bandwidth * and build the endpoint table */ alt_idx = build_isoc_ep_tb(gspca_dev, intf, ep_tb); if (alt_idx <= 0) { pr_err("no transfer endpoint found\n"); return -EIO; } } /* set the highest alternate setting and * loop until urb submit succeeds */ gspca_input_destroy_urb(gspca_dev); gspca_dev->alt = ep_tb[--alt_idx].alt; alt = -1; for (;;) { if (alt != gspca_dev->alt) { alt = gspca_dev->alt; if (intf->num_altsetting > 1) { ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, alt); if (ret < 0) { if (ret == -ENOSPC) goto retry; /*fixme: ugly*/ pr_err("set alt %d err %d\n", alt, ret); goto out; } } } if (!gspca_dev->cam.no_urb_create) { gspca_dbg(gspca_dev, D_STREAM, "init transfer alt %d\n", alt); ret = create_urbs(gspca_dev, alt_xfer(&intf->altsetting[alt], xfer, gspca_dev->xfer_ep)); if (ret < 0) { destroy_urbs(gspca_dev); goto out; } } /* clear the bulk endpoint */ if (gspca_dev->cam.bulk) usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* start the cam */ ret = gspca_dev->sd_desc->start(gspca_dev); if (ret < 0) { destroy_urbs(gspca_dev); goto out; } v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler); gspca_dev->streaming = true; /* some bulk transfers are started by the subdriver */ if (gspca_dev->cam.bulk && gspca_dev->cam.bulk_nurbs == 0) break; /* submit the URBs */ for (n = 0; n < MAX_NURBS; n++) { urb = gspca_dev->urb[n]; if (urb == NULL) break; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) break; } if (ret >= 0) break; /* transfer is started */ /* something when wrong * stop the webcam and free the transfer resources */ gspca_stream_off(gspca_dev); if (ret != -ENOSPC) { pr_err("usb_submit_urb alt %d err %d\n", gspca_dev->alt, ret); goto out; } /* the bandwidth is not wide enough * negotiate or try a lower alternate setting */ retry: gspca_err(gspca_dev, "alt %d - bandwidth not wide enough, trying again\n", alt); msleep(20); /* wait for kill complete */ if (gspca_dev->sd_desc->isoc_nego) { ret = gspca_dev->sd_desc->isoc_nego(gspca_dev); if (ret < 0) goto out; } else { if (alt_idx <= 0) { pr_err("no transfer endpoint found\n"); ret = -EIO; goto out; } gspca_dev->alt = ep_tb[--alt_idx].alt; } } out: gspca_input_create_urb(gspca_dev); return ret; } static void gspca_set_default_mode(struct gspca_dev *gspca_dev) { int i; i = gspca_dev->cam.nmodes - 1; /* take the highest mode */ gspca_dev->curr_mode = i; gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i]; /* does nothing if ctrl_handler == NULL */ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler); } static int wxh_to_mode(struct gspca_dev *gspca_dev, int width, int height, u32 pixelformat) { int i; for (i = 0; i < gspca_dev->cam.nmodes; i++) { if (width == gspca_dev->cam.cam_mode[i].width && height == gspca_dev->cam.cam_mode[i].height && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat) return i; } return -EINVAL; } static int wxh_to_nearest_mode(struct gspca_dev *gspca_dev, int width, int height, u32 pixelformat) { int i; for (i = gspca_dev->cam.nmodes; --i >= 0; ) { if (width >= gspca_dev->cam.cam_mode[i].width && height >= gspca_dev->cam.cam_mode[i].height && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat) return i; } for (i = gspca_dev->cam.nmodes; --i > 0; ) { if (width >= gspca_dev->cam.cam_mode[i].width && height >= gspca_dev->cam.cam_mode[i].height) break; } return i; } /* * search a mode with the right pixel format */ static int gspca_get_mode(struct gspca_dev *gspca_dev, int mode, int pixfmt) { int modeU, modeD; modeU = modeD = mode; while ((modeU < gspca_dev->cam.nmodes) || modeD >= 0) { if (--modeD >= 0) { if (gspca_dev->cam.cam_mode[modeD].pixelformat == pixfmt) return modeD; } if (++modeU < gspca_dev->cam.nmodes) { if (gspca_dev->cam.cam_mode[modeU].pixelformat == pixfmt) return modeU; } } return -EINVAL; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_chip_info(struct file *file, void *priv, struct v4l2_dbg_chip_info *chip) { struct gspca_dev *gspca_dev = video_drvdata(file); gspca_dev->usb_err = 0; if (gspca_dev->sd_desc->get_chip_info) return gspca_dev->sd_desc->get_chip_info(gspca_dev, chip); return chip->match.addr ? -EINVAL : 0; } static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct gspca_dev *gspca_dev = video_drvdata(file); gspca_dev->usb_err = 0; return gspca_dev->sd_desc->get_register(gspca_dev, reg); } static int vidioc_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg) { struct gspca_dev *gspca_dev = video_drvdata(file); gspca_dev->usb_err = 0; return gspca_dev->sd_desc->set_register(gspca_dev, reg); } #endif static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmtdesc) { struct gspca_dev *gspca_dev = video_drvdata(file); int i, j, index; __u32 fmt_tb[8]; /* give an index to each format */ index = 0; for (i = gspca_dev->cam.nmodes; --i >= 0; ) { fmt_tb[index] = gspca_dev->cam.cam_mode[i].pixelformat; j = 0; for (;;) { if (fmt_tb[j] == fmt_tb[index]) break; j++; } if (j == index) { if (fmtdesc->index == index) break; /* new format */ index++; if (index >= ARRAY_SIZE(fmt_tb)) return -EINVAL; } } if (i < 0) return -EINVAL; /* no more format */ fmtdesc->pixelformat = fmt_tb[index]; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *_priv, struct v4l2_format *fmt) { struct gspca_dev *gspca_dev = video_drvdata(file); u32 priv = fmt->fmt.pix.priv; fmt->fmt.pix = gspca_dev->pixfmt; /* some drivers use priv internally, so keep the original value */ fmt->fmt.pix.priv = priv; return 0; } static int try_fmt_vid_cap(struct gspca_dev *gspca_dev, struct v4l2_format *fmt) { int w, h, mode, mode2; w = fmt->fmt.pix.width; h = fmt->fmt.pix.height; PDEBUG_MODE(gspca_dev, D_CONF, "try fmt cap", fmt->fmt.pix.pixelformat, w, h); /* search the nearest mode for width and height */ mode = wxh_to_nearest_mode(gspca_dev, w, h, fmt->fmt.pix.pixelformat); /* OK if right palette */ if (gspca_dev->cam.cam_mode[mode].pixelformat != fmt->fmt.pix.pixelformat) { /* else, search the closest mode with the same pixel format */ mode2 = gspca_get_mode(gspca_dev, mode, fmt->fmt.pix.pixelformat); if (mode2 >= 0) mode = mode2; } fmt->fmt.pix = gspca_dev->cam.cam_mode[mode]; if (gspca_dev->sd_desc->try_fmt) { /* pass original resolution to subdriver try_fmt */ fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; gspca_dev->sd_desc->try_fmt(gspca_dev, fmt); } return mode; /* used when s_fmt */ } static int vidioc_try_fmt_vid_cap(struct file *file, void *_priv, struct v4l2_format *fmt) { struct gspca_dev *gspca_dev = video_drvdata(file); u32 priv = fmt->fmt.pix.priv; if (try_fmt_vid_cap(gspca_dev, fmt) < 0) return -EINVAL; /* some drivers use priv internally, so keep the original value */ fmt->fmt.pix.priv = priv; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv, struct v4l2_format *fmt) { struct gspca_dev *gspca_dev = video_drvdata(file); u32 priv = fmt->fmt.pix.priv; int mode; if (vb2_is_busy(&gspca_dev->queue)) return -EBUSY; mode = try_fmt_vid_cap(gspca_dev, fmt); if (mode < 0) return -EINVAL; gspca_dev->curr_mode = mode; if (gspca_dev->sd_desc->try_fmt) /* subdriver try_fmt can modify format parameters */ gspca_dev->pixfmt = fmt->fmt.pix; else gspca_dev->pixfmt = gspca_dev->cam.cam_mode[mode]; /* some drivers use priv internally, so keep the original value */ fmt->fmt.pix.priv = priv; return 0; } static int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { struct gspca_dev *gspca_dev = video_drvdata(file); int i; __u32 index = 0; if (gspca_dev->sd_desc->enum_framesizes) return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize); for (i = 0; i < gspca_dev->cam.nmodes; i++) { if (fsize->pixel_format != gspca_dev->cam.cam_mode[i].pixelformat) continue; if (fsize->index == index) { fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = gspca_dev->cam.cam_mode[i].width; fsize->discrete.height = gspca_dev->cam.cam_mode[i].height; return 0; } index++; } return -EINVAL; } static int vidioc_enum_frameintervals(struct file *filp, void *priv, struct v4l2_frmivalenum *fival) { struct gspca_dev *gspca_dev = video_drvdata(filp); int mode; __u32 i; mode = wxh_to_mode(gspca_dev, fival->width, fival->height, fival->pixel_format); if (mode < 0) return -EINVAL; if (gspca_dev->cam.mode_framerates == NULL || gspca_dev->cam.mode_framerates[mode].nrates == 0) return -EINVAL; if (fival->pixel_format != gspca_dev->cam.cam_mode[mode].pixelformat) return -EINVAL; for (i = 0; i < gspca_dev->cam.mode_framerates[mode].nrates; i++) { if (fival->index == i) { fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete.numerator = 1; fival->discrete.denominator = gspca_dev->cam.mode_framerates[mode].rates[i]; return 0; } } return -EINVAL; } static void gspca_release(struct v4l2_device *v4l2_device) { struct gspca_dev *gspca_dev = container_of(v4l2_device, struct gspca_dev, v4l2_dev); v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); v4l2_device_unregister(&gspca_dev->v4l2_dev); kfree(gspca_dev->usb_buf); kfree(gspca_dev); } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct gspca_dev *gspca_dev = video_drvdata(file); strscpy((char *)cap->driver, gspca_dev->sd_desc->name, sizeof(cap->driver)); if (gspca_dev->dev->product != NULL) { strscpy((char *)cap->card, gspca_dev->dev->product, sizeof(cap->card)); } else { snprintf((char *) cap->card, sizeof cap->card, "USB Camera (%04x:%04x)", le16_to_cpu(gspca_dev->dev->descriptor.idVendor), le16_to_cpu(gspca_dev->dev->descriptor.idProduct)); } usb_make_path(gspca_dev->dev, (char *) cap->bus_info, sizeof(cap->bus_info)); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct gspca_dev *gspca_dev = video_drvdata(file); if (input->index != 0) return -EINVAL; input->type = V4L2_INPUT_TYPE_CAMERA; input->status = gspca_dev->cam.input_flags; strscpy(input->name, gspca_dev->sd_desc->name, sizeof input->name); return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int vidioc_g_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *jpegcomp) { struct gspca_dev *gspca_dev = video_drvdata(file); gspca_dev->usb_err = 0; return gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp); } static int vidioc_s_jpegcomp(struct file *file, void *priv, const struct v4l2_jpegcompression *jpegcomp) { struct gspca_dev *gspca_dev = video_drvdata(file); gspca_dev->usb_err = 0; return gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp); } static int vidioc_g_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct gspca_dev *gspca_dev = video_drvdata(filp); parm->parm.capture.readbuffers = gspca_dev->queue.min_queued_buffers; if (!gspca_dev->sd_desc->get_streamparm) return 0; parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; gspca_dev->usb_err = 0; gspca_dev->sd_desc->get_streamparm(gspca_dev, parm); return gspca_dev->usb_err; } static int vidioc_s_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct gspca_dev *gspca_dev = video_drvdata(filp); parm->parm.capture.readbuffers = gspca_dev->queue.min_queued_buffers; if (!gspca_dev->sd_desc->set_streamparm) { parm->parm.capture.capability = 0; return 0; } parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; gspca_dev->usb_err = 0; gspca_dev->sd_desc->set_streamparm(gspca_dev, parm); return gspca_dev->usb_err; } static int gspca_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (*nplanes) return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; return 0; } static int gspca_buffer_prepare(struct vb2_buffer *vb) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (vb2_plane_size(vb, 0) < size) { gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } return 0; } static void gspca_buffer_finish(struct vb2_buffer *vb) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); if (!gspca_dev->sd_desc->dq_callback) return; gspca_dev->usb_err = 0; if (gspca_dev->present) gspca_dev->sd_desc->dq_callback(gspca_dev); } static void gspca_buffer_queue(struct vb2_buffer *vb) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); struct gspca_buffer *buf = to_gspca_buffer(vb); unsigned long flags; spin_lock_irqsave(&gspca_dev->qlock, flags); list_add_tail(&buf->list, &gspca_dev->buf_list); spin_unlock_irqrestore(&gspca_dev->qlock, flags); } static void gspca_return_all_buffers(struct gspca_dev *gspca_dev, enum vb2_buffer_state state) { struct gspca_buffer *buf, *node; unsigned long flags; spin_lock_irqsave(&gspca_dev->qlock, flags); list_for_each_entry_safe(buf, node, &gspca_dev->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock_irqrestore(&gspca_dev->qlock, flags); } static int gspca_start_streaming(struct vb2_queue *vq, unsigned int count) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); int ret; gspca_dev->sequence = 0; ret = gspca_init_transfer(gspca_dev); if (ret) gspca_return_all_buffers(gspca_dev, VB2_BUF_STATE_QUEUED); return ret; } static void gspca_stop_streaming(struct vb2_queue *vq) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); gspca_stream_off(gspca_dev); /* Release all active buffers */ gspca_return_all_buffers(gspca_dev, VB2_BUF_STATE_ERROR); } static const struct vb2_ops gspca_qops = { .queue_setup = gspca_queue_setup, .buf_prepare = gspca_buffer_prepare, .buf_finish = gspca_buffer_finish, .buf_queue = gspca_buffer_queue, .start_streaming = gspca_start_streaming, .stop_streaming = gspca_stop_streaming, }; static const struct v4l2_file_operations dev_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .read = vb2_fop_read, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; static const struct v4l2_ioctl_ops dev_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_g_jpegcomp = vidioc_g_jpegcomp, .vidioc_s_jpegcomp = vidioc_s_jpegcomp, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_chip_info = vidioc_g_chip_info, .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static const struct video_device gspca_template = { .name = "gspca main driver", .fops = &dev_fops, .ioctl_ops = &dev_ioctl_ops, .release = video_device_release_empty, /* We use v4l2_dev.release */ }; /* * probe and create a new gspca device * * This function must be called by the sub-driver when it is * called for probing a new device. */ int gspca_dev_probe2(struct usb_interface *intf, const struct usb_device_id *id, const struct sd_desc *sd_desc, int dev_size, struct module *module) { struct gspca_dev *gspca_dev; struct usb_device *dev = interface_to_usbdev(intf); struct vb2_queue *q; int ret; pr_info("%s-" GSPCA_VERSION " probing %04x:%04x\n", sd_desc->name, id->idVendor, id->idProduct); /* create the device */ if (dev_size < sizeof *gspca_dev) dev_size = sizeof *gspca_dev; gspca_dev = kzalloc(dev_size, GFP_KERNEL); if (!gspca_dev) { pr_err("couldn't kzalloc gspca struct\n"); return -ENOMEM; } gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL); if (!gspca_dev->usb_buf) { pr_err("out of memory\n"); ret = -ENOMEM; goto out; } gspca_dev->dev = dev; gspca_dev->iface = intf->cur_altsetting->desc.bInterfaceNumber; gspca_dev->xfer_ep = -1; /* check if any audio device */ if (dev->actconfig->desc.bNumInterfaces != 1) { int i; struct usb_interface *intf2; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf2 = dev->actconfig->interface[i]; if (intf2 != NULL && intf2->altsetting != NULL && intf2->altsetting->desc.bInterfaceClass == USB_CLASS_AUDIO) { gspca_dev->audio = 1; break; } } } gspca_dev->v4l2_dev.release = gspca_release; ret = v4l2_device_register(&intf->dev, &gspca_dev->v4l2_dev); if (ret) goto out; gspca_dev->present = true; gspca_dev->sd_desc = sd_desc; gspca_dev->empty_packet = -1; /* don't check the empty packets */ gspca_dev->vdev = gspca_template; gspca_dev->vdev.v4l2_dev = &gspca_dev->v4l2_dev; gspca_dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(&gspca_dev->vdev, gspca_dev); gspca_dev->module = module; mutex_init(&gspca_dev->usb_lock); gspca_dev->vdev.lock = &gspca_dev->usb_lock; init_waitqueue_head(&gspca_dev->wq); /* Initialize the vb2 queue */ q = &gspca_dev->queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q->drv_priv = gspca_dev; q->buf_struct_size = sizeof(struct gspca_buffer); q->ops = &gspca_qops; q->mem_ops = &vb2_vmalloc_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_queued_buffers = 2; q->lock = &gspca_dev->usb_lock; ret = vb2_queue_init(q); if (ret) goto out; gspca_dev->vdev.queue = q; INIT_LIST_HEAD(&gspca_dev->buf_list); spin_lock_init(&gspca_dev->qlock); /* configure the subdriver and initialize the USB device */ ret = sd_desc->config(gspca_dev, id); if (ret < 0) goto out; ret = sd_desc->init(gspca_dev); if (ret < 0) goto out; if (sd_desc->init_controls) ret = sd_desc->init_controls(gspca_dev); if (ret < 0) goto out; gspca_set_default_mode(gspca_dev); ret = gspca_input_connect(gspca_dev); if (ret) goto out; #ifdef CONFIG_VIDEO_ADV_DEBUG if (!gspca_dev->sd_desc->get_register) v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_DBG_G_REGISTER); if (!gspca_dev->sd_desc->set_register) v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_DBG_S_REGISTER); #endif if (!gspca_dev->sd_desc->get_jcomp) v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_G_JPEGCOMP); if (!gspca_dev->sd_desc->set_jcomp) v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_S_JPEGCOMP); /* init video stuff */ ret = video_register_device(&gspca_dev->vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { pr_err("video_register_device err %d\n", ret); goto out; } usb_set_intfdata(intf, gspca_dev); gspca_dbg(gspca_dev, D_PROBE, "%s created\n", video_device_node_name(&gspca_dev->vdev)); gspca_input_create_urb(gspca_dev); return 0; out: #if IS_ENABLED(CONFIG_INPUT) if (gspca_dev->input_dev) input_unregister_device(gspca_dev->input_dev); #endif v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); v4l2_device_unregister(&gspca_dev->v4l2_dev); if (sd_desc->probe_error) sd_desc->probe_error(gspca_dev); kfree(gspca_dev->usb_buf); kfree(gspca_dev); return ret; } EXPORT_SYMBOL(gspca_dev_probe2); /* same function as the previous one, but check the interface */ int gspca_dev_probe(struct usb_interface *intf, const struct usb_device_id *id, const struct sd_desc *sd_desc, int dev_size, struct module *module) { struct usb_device *dev = interface_to_usbdev(intf); /* we don't handle multi-config cameras */ if (dev->descriptor.bNumConfigurations != 1) { pr_err("%04x:%04x too many config\n", id->idVendor, id->idProduct); return -ENODEV; } /* the USB video interface must be the first one */ if (dev->actconfig->desc.bNumInterfaces != 1 && intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; return gspca_dev_probe2(intf, id, sd_desc, dev_size, module); } EXPORT_SYMBOL(gspca_dev_probe); /* * USB disconnection * * This function must be called by the sub-driver * when the device disconnects, after the specific resources are freed. */ void gspca_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); #if IS_ENABLED(CONFIG_INPUT) struct input_dev *input_dev; #endif gspca_dbg(gspca_dev, D_PROBE, "%s disconnect\n", video_device_node_name(&gspca_dev->vdev)); mutex_lock(&gspca_dev->usb_lock); gspca_dev->present = false; destroy_urbs(gspca_dev); gspca_input_destroy_urb(gspca_dev); vb2_queue_error(&gspca_dev->queue); #if IS_ENABLED(CONFIG_INPUT) input_dev = gspca_dev->input_dev; if (input_dev) { gspca_dev->input_dev = NULL; input_unregister_device(input_dev); } #endif v4l2_device_disconnect(&gspca_dev->v4l2_dev); video_unregister_device(&gspca_dev->vdev); mutex_unlock(&gspca_dev->usb_lock); /* (this will call gspca_release() immediately or on last close) */ v4l2_device_put(&gspca_dev->v4l2_dev); } EXPORT_SYMBOL(gspca_disconnect); #ifdef CONFIG_PM int gspca_suspend(struct usb_interface *intf, pm_message_t message) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); gspca_input_destroy_urb(gspca_dev); if (!vb2_start_streaming_called(&gspca_dev->queue)) return 0; mutex_lock(&gspca_dev->usb_lock); gspca_dev->frozen = 1; /* avoid urb error messages */ gspca_dev->usb_err = 0; if (gspca_dev->sd_desc->stopN) gspca_dev->sd_desc->stopN(gspca_dev); destroy_urbs(gspca_dev); gspca_set_alt0(gspca_dev); if (gspca_dev->sd_desc->stop0) gspca_dev->sd_desc->stop0(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); return 0; } EXPORT_SYMBOL(gspca_suspend); int gspca_resume(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); int streaming, ret = 0; mutex_lock(&gspca_dev->usb_lock); gspca_dev->frozen = 0; gspca_dev->usb_err = 0; gspca_dev->sd_desc->init(gspca_dev); /* * Most subdrivers send all ctrl values on sd_start and thus * only write to the device registers on s_ctrl when streaming -> * Clear streaming to avoid setting all ctrls twice. */ streaming = vb2_start_streaming_called(&gspca_dev->queue); if (streaming) ret = gspca_init_transfer(gspca_dev); else gspca_input_create_urb(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); return ret; } EXPORT_SYMBOL(gspca_resume); #endif /* -- module insert / remove -- */ static int __init gspca_init(void) { pr_info("v" GSPCA_VERSION " registered\n"); return 0; } static void __exit gspca_exit(void) { } module_init(gspca_init); module_exit(gspca_exit); module_param_named(debug, gspca_debug, int, 0644); MODULE_PARM_DESC(debug, "1:probe 2:config 3:stream 4:frame 5:packet 6:usbi 7:usbo");
16 12 13 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCSI_SCSI_DEVICE_H #define _SCSI_SCSI_DEVICE_H #include <linux/list.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/blk-mq.h> #include <scsi/scsi.h> #include <linux/atomic.h> #include <linux/sbitmap.h> struct bsg_device; struct device; struct request_queue; struct scsi_cmnd; struct scsi_lun; struct scsi_sense_hdr; typedef __u64 __bitwise blist_flags_t; #define SCSI_SENSE_BUFFERSIZE 96 struct scsi_mode_data { __u32 length; __u16 block_descriptor_length; __u8 medium_type; __u8 device_specific; __u8 header_length; __u8 longlba:1; }; /* * sdev state: If you alter this, you also need to alter scsi_sysfs.c * (for the ascii descriptions) and the state model enforcer: * scsi_lib:scsi_device_set_state(). */ enum scsi_device_state { SDEV_CREATED = 1, /* device created but not added to sysfs * Only internal commands allowed (for inq) */ SDEV_RUNNING, /* device properly configured * All commands allowed */ SDEV_CANCEL, /* beginning to delete device * Only error handler commands allowed */ SDEV_DEL, /* device deleted * no commands allowed */ SDEV_QUIESCE, /* Device quiescent. No block commands * will be accepted, only specials (which * originate in the mid-layer) */ SDEV_OFFLINE, /* Device offlined (by error handling or * user request */ SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */ SDEV_BLOCK, /* Device blocked by scsi lld. No * scsi commands from user or midlayer * should be issued to the scsi * lld. */ SDEV_CREATED_BLOCK, /* same as above but for created devices */ }; enum scsi_scan_mode { SCSI_SCAN_INITIAL = 0, SCSI_SCAN_RESCAN, SCSI_SCAN_MANUAL, }; enum scsi_device_event { SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */ SDEV_EVT_POWER_ON_RESET_OCCURRED, /* 29 00 UA reported */ SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, SDEV_EVT_LAST = SDEV_EVT_POWER_ON_RESET_OCCURRED, SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 }; struct scsi_event { enum scsi_device_event evt_type; struct list_head node; /* put union of data structures, for non-simple event types, * here */ }; /** * struct scsi_vpd - SCSI Vital Product Data * @rcu: For kfree_rcu(). * @len: Length in bytes of @data. * @data: VPD data as defined in various T10 SCSI standard documents. */ struct scsi_vpd { struct rcu_head rcu; int len; unsigned char data[]; }; struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; /* the next two are protected by the host->host_lock */ struct list_head siblings; /* list of all devices on this host */ struct list_head same_target_siblings; /* just the devices sharing same target id */ struct sbitmap budget_map; atomic_t device_blocked; /* Device returned QUEUE_FULL. */ atomic_t restarts; spinlock_t list_lock; struct list_head starved_entry; unsigned short queue_depth; /* How deep of a queue we want */ unsigned short max_queue_depth; /* max queue depth */ unsigned short last_queue_full_depth; /* These two are used by */ unsigned short last_queue_full_count; /* scsi_track_queue_full() */ unsigned long last_queue_full_time; /* last queue full time */ unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) unsigned long last_queue_ramp_up; /* last queue ramp up time */ unsigned int id, channel; u64 lun; unsigned int manufacturer; /* Manufacturer of device, for using * vendor-specific cmd's */ unsigned sector_size; /* size in bytes */ void *hostdata; /* available to low-level driver */ unsigned char type; char scsi_level; char inq_periph_qual; /* PQ from INQUIRY data */ struct mutex inquiry_mutex; unsigned char inquiry_len; /* valid bytes in 'inquiry' */ unsigned char * inquiry; /* INQUIRY response data */ const char * vendor; /* [back_compat] point into 'inquiry' ... */ const char * model; /* ... after scan; point to static string */ const char * rev; /* ... "nullnullnullnull" before scan */ #define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */ struct scsi_vpd __rcu *vpd_pg0; struct scsi_vpd __rcu *vpd_pg83; struct scsi_vpd __rcu *vpd_pg80; struct scsi_vpd __rcu *vpd_pg89; struct scsi_vpd __rcu *vpd_pgb0; struct scsi_vpd __rcu *vpd_pgb1; struct scsi_vpd __rcu *vpd_pgb2; struct scsi_vpd __rcu *vpd_pgb7; struct scsi_target *sdev_target; blist_flags_t sdev_bflags; /* black/white flags as also found in * scsi_devinfo.[hc]. For now used only to * pass settings from sdev_init to scsi * core. */ unsigned int eh_timeout; /* Error handling timeout */ /* * If true, let the high-level device driver (sd) manage the device * power state for system suspend/resume (suspend to RAM and * hibernation) operations. */ unsigned manage_system_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for runtime device suspand and resume operations. */ unsigned manage_runtime_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for system shutdown (power off) operations. */ unsigned manage_shutdown:1; /* * If set and if the device is runtime suspended, ask the high-level * device driver (sd) to force a runtime resume of the device. */ unsigned force_runtime_start_on_system_start:1; unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ unsigned lockable:1; /* Able to prevent media removal */ unsigned locked:1; /* Media removal disabled */ unsigned borken:1; /* Tell the Seagate driver to be * painfully slow on this device */ unsigned disconnect:1; /* can disconnect */ unsigned soft_reset:1; /* Uses soft reset option */ unsigned sdtr:1; /* Device supports SDTR messages */ unsigned wdtr:1; /* Device supports WDTR messages */ unsigned ppr:1; /* Device supports PPR messages */ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ unsigned simple_tags:1; /* simple queue tag messages are enabled */ unsigned was_reset:1; /* There was a bus reset on the bus for * this device */ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN * because we did a bus reset. */ unsigned use_10_for_rw:1; /* first try 10-byte read / write */ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ unsigned no_write_same:1; /* no WRITE SAME command */ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ unsigned use_16_for_sync:1; /* Use sync (16) over sync (10) */ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ unsigned skip_vpd_pages:1; /* do not read VPD pages */ unsigned try_vpd_pages:1; /* attempt to read VPD pages */ unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ unsigned last_sector_bug:1; /* do not use multisector accesses on SD_LAST_BUGGY_SECTORS */ unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */ unsigned security_supported:1; /* Supports Security Protocols */ unsigned is_visible:1; /* is the device visible in sysfs */ unsigned wce_default_on:1; /* Cache is ON by default */ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */ unsigned silence_suspend:1; /* Do not print runtime PM related messages */ unsigned no_vpd_size:1; /* No VPD size reported in header */ unsigned cdl_supported:1; /* Command duration limits supported */ unsigned cdl_enable:1; /* Enable/disable Command duration limits */ unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */ struct list_head event_list; /* asserted events */ struct work_struct event_work; unsigned int max_device_blocked; /* what device_blocked counts down from */ #define SCSI_DEFAULT_DEVICE_BLOCKED 3 atomic_t iorequest_cnt; atomic_t iodone_cnt; atomic_t ioerr_cnt; atomic_t iotmo_cnt; struct device sdev_gendev, sdev_dev; struct work_struct requeue_work; struct scsi_device_handler *handler; void *handler_data; size_t dma_drain_len; void *dma_drain_buf; unsigned int sg_timeout; unsigned int sg_reserved_size; struct bsg_device *bsg_dev; unsigned char access_state; struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; unsigned long sdev_data[]; } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_device(d) \ container_of(d, struct scsi_device, sdev_gendev) #define class_to_sdev(d) \ container_of(d, struct scsi_device, sdev_dev) #define transport_class_to_sdev(class_dev) \ to_scsi_device(class_dev->parent) #define sdev_dbg(sdev, fmt, a...) \ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) /* * like scmd_printk, but the device name is passed in * as a string pointer */ __printf(4, 5) void sdev_prefix_printk(const char *, const struct scsi_device *, const char *, const char *, ...); #define sdev_printk(l, sdev, fmt, a...) \ sdev_prefix_printk(l, sdev, NULL, fmt, ##a) __printf(3, 4) void scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); #define scmd_dbg(scmd, fmt, a...) \ do { \ struct request *__rq = scsi_cmd_to_rq((scmd)); \ \ if (__rq->q->disk) \ sdev_dbg((scmd)->device, "[%s] " fmt, \ __rq->q->disk->disk_name, ##a); \ else \ sdev_dbg((scmd)->device, fmt, ##a); \ } while (0) enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, STARGET_REMOVE, STARGET_CREATED_REMOVE, STARGET_DEL, }; /* * scsi_target: representation of a scsi target, for now, this is only * used for single_lun devices. If no one has active IO to the target, * starget_sdev_user is NULL, else it points to the active sdev. */ struct scsi_target { struct scsi_device *starget_sdev_user; struct list_head siblings; struct list_head devices; struct device dev; struct kref reap_ref; /* last put renders target invisible */ unsigned int channel; unsigned int id; /* target id ... replace * scsi_device.id eventually */ unsigned int create:1; /* signal that it needs to be added */ unsigned int single_lun:1; /* Indicates we should only * allow I/O to one of the luns * for the device at a time. */ unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f * means no lun present. */ unsigned int no_report_luns:1; /* Don't use * REPORT LUNS for scanning. */ unsigned int expecting_lun_change:1; /* A device has reported * a 3F/0E UA, other devices on * the same target will also. */ /* commands actually active on LLD. */ atomic_t target_busy; atomic_t target_blocked; /* * LLDs should set this in the sdev_init host template callout. * If set to zero then there is not limit. */ unsigned int can_queue; unsigned int max_target_blocked; #define SCSI_DEFAULT_TARGET_BLOCKED 3 char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ unsigned long starget_data[]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_target(d) container_of(d, struct scsi_target, dev) static inline struct scsi_target *scsi_target(struct scsi_device *sdev) { return to_scsi_target(sdev->sdev_gendev.parent); } #define transport_class_to_starget(class_dev) \ to_scsi_target(class_dev->parent) #define starget_printk(prefix, starget, fmt, a...) \ dev_printk(prefix, &(starget)->dev, fmt, ##a) extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, uint, uint, u64, void *hostdata); extern int scsi_add_device(struct Scsi_Host *host, uint channel, uint target, u64 lun); extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); void scsi_cdl_check(struct scsi_device *sdev); int scsi_cdl_enable(struct scsi_device *sdev, bool enable); extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int __must_check scsi_device_get(struct scsi_device *); extern void scsi_device_put(struct scsi_device *); extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, uint, uint, u64); extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, uint, uint, u64); extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, u64); extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, u64); extern void starget_for_each_device(struct scsi_target *, void *, void (*fn)(struct scsi_device *, void *)); extern void __starget_for_each_device(struct scsi_target *, void *, void (*fn)(struct scsi_device *, void *)); /* only exposed to implement shost_for_each_device */ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, struct scsi_device *); /** * shost_for_each_device - iterate over all devices of a host * @sdev: the &struct scsi_device to use as a cursor * @shost: the &struct scsi_host to iterate over * * Iterator that returns each device attached to @shost. This loop * takes a reference on each device and releases it at the end. If * you break out of the loop, you must call scsi_device_put(sdev). */ #define shost_for_each_device(sdev, shost) \ for ((sdev) = __scsi_iterate_devices((shost), NULL); \ (sdev); \ (sdev) = __scsi_iterate_devices((shost), (sdev))) /** * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) * @sdev: the &struct scsi_device to use as a cursor * @shost: the &struct scsi_host to iterate over * * Iterator that returns each device attached to @shost. It does _not_ * take a reference on the scsi_device, so the whole loop must be * protected by shost->host_lock. * * Note: The only reason to use this is because you need to access the * device list in interrupt context. Otherwise you really want to use * shost_for_each_device instead. */ #define __shost_for_each_device(sdev, shost) \ list_for_each_entry((sdev), &((shost)->__devices), siblings) extern int scsi_change_queue_depth(struct scsi_device *, int); extern int scsi_track_queue_full(struct scsi_device *, int); extern int scsi_set_medium_removal(struct scsi_device *, char); int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *); extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *); extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, struct scsi_sense_hdr *sshdr); extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, int buf_len); int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode, unsigned short sa); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, gfp_t gfpflags); extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); extern void sdev_evt_send_simple(struct scsi_device *sdev, enum scsi_device_event evt_type, gfp_t gfpflags); extern int scsi_device_quiesce(struct scsi_device *sdev); extern void scsi_device_resume(struct scsi_device *sdev); extern void scsi_target_quiesce(struct scsi_target *); extern void scsi_target_resume(struct scsi_target *); extern void scsi_scan_target(struct device *parent, unsigned int channel, unsigned int id, u64 lun, enum scsi_scan_mode rescan); extern void scsi_target_reap(struct scsi_target *); void scsi_block_targets(struct Scsi_Host *shost, struct device *dev); extern void scsi_target_unblock(struct device *, enum scsi_device_state); extern void scsi_remove_target(struct device *); extern const char *scsi_device_state_name(enum scsi_device_state); extern int scsi_is_sdev_device(const struct device *); extern int scsi_is_target_device(const struct device *); extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); /* * scsi_execute_cmd users can set scsi_failure.result to have * scsi_check_passthrough fail/retry a command. scsi_failure.result can be a * specific host byte or message code, or SCMD_FAILURE_RESULT_ANY can be used * to match any host or message code. */ #define SCMD_FAILURE_RESULT_ANY 0x7fffffff /* * Set scsi_failure.result to SCMD_FAILURE_STAT_ANY to fail/retry any failure * scsi_status_is_good returns false for. */ #define SCMD_FAILURE_STAT_ANY 0xff /* * The following can be set to the scsi_failure sense, asc and ascq fields to * match on any sense, ASC, or ASCQ value. */ #define SCMD_FAILURE_SENSE_ANY 0xff #define SCMD_FAILURE_ASC_ANY 0xff #define SCMD_FAILURE_ASCQ_ANY 0xff /* Always retry a matching failure. */ #define SCMD_FAILURE_NO_LIMIT -1 struct scsi_failure { int result; u8 sense; u8 asc; u8 ascq; /* * Number of times scsi_execute_cmd will retry the failure. It does * not count for the total_allowed. */ s8 allowed; /* Number of times the failure has been retried. */ s8 retries; }; struct scsi_failures { /* * If a scsi_failure does not have a retry limit setup this limit will * be used. */ int total_allowed; int total_retries; struct scsi_failure *failure_definitions; }; /* Optional arguments to scsi_execute_cmd */ struct scsi_exec_args { unsigned char *sense; /* sense buffer */ unsigned int sense_len; /* sense buffer len */ struct scsi_sense_hdr *sshdr; /* decoded sense header */ blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */ int scmd_flags; /* SCMD flags */ int *resid; /* residual length */ struct scsi_failures *failures; /* failures to retry */ }; int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, blk_opf_t opf, void *buffer, unsigned int bufflen, int timeout, int retries, const struct scsi_exec_args *args); void scsi_failures_reset_retries(struct scsi_failures *failures); extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t); extern int scsi_vpd_tpg_id(struct scsi_device *, int *); #ifdef CONFIG_PM extern int scsi_autopm_get_device(struct scsi_device *); extern void scsi_autopm_put_device(struct scsi_device *); #else static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } static inline void scsi_autopm_put_device(struct scsi_device *d) {} #endif /* CONFIG_PM */ static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) { return device_reprobe(&sdev->sdev_gendev); } static inline unsigned int sdev_channel(struct scsi_device *sdev) { return sdev->channel; } static inline unsigned int sdev_id(struct scsi_device *sdev) { return sdev->id; } #define scmd_id(scmd) sdev_id((scmd)->device) #define scmd_channel(scmd) sdev_channel((scmd)->device) /* * checks for positions of the SCSI state machine */ static inline int scsi_device_online(struct scsi_device *sdev) { return (sdev->sdev_state != SDEV_OFFLINE && sdev->sdev_state != SDEV_TRANSPORT_OFFLINE && sdev->sdev_state != SDEV_DEL); } static inline int scsi_device_blocked(struct scsi_device *sdev) { return sdev->sdev_state == SDEV_BLOCK || sdev->sdev_state == SDEV_CREATED_BLOCK; } static inline int scsi_device_created(struct scsi_device *sdev) { return sdev->sdev_state == SDEV_CREATED || sdev->sdev_state == SDEV_CREATED_BLOCK; } int scsi_internal_device_block_nowait(struct scsi_device *sdev); int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, enum scsi_device_state new_state); /* accessor functions for the SCSI parameters */ static inline int scsi_device_sync(struct scsi_device *sdev) { return sdev->sdtr; } static inline int scsi_device_wide(struct scsi_device *sdev) { return sdev->wdtr; } static inline int scsi_device_dt(struct scsi_device *sdev) { return sdev->ppr; } static inline int scsi_device_dt_only(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return (sdev->inquiry[56] & 0x0c) == 0x04; } static inline int scsi_device_ius(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return sdev->inquiry[56] & 0x01; } static inline int scsi_device_qas(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return sdev->inquiry[56] & 0x02; } static inline int scsi_device_enclosure(struct scsi_device *sdev) { return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; } static inline int scsi_device_protection(struct scsi_device *sdev) { if (sdev->no_dif) return 0; return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); } static inline int scsi_device_tpgs(struct scsi_device *sdev) { return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; } /** * scsi_device_supports_vpd - test if a device supports VPD pages * @sdev: the &struct scsi_device to test * * If the 'try_vpd_pages' flag is set it takes precedence. * Otherwise we will assume VPD pages are supported if the * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set. */ static inline int scsi_device_supports_vpd(struct scsi_device *sdev) { /* Attempt VPD inquiry if the device blacklist explicitly calls * for it. */ if (sdev->try_vpd_pages) return 1; /* * Although VPD inquiries can go to SCSI-2 type devices, * some USB ones crash on receiving them, and the pages * we currently ask for are mandatory for SPC-2 and beyond */ if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages) return 1; return 0; } static inline int scsi_device_busy(struct scsi_device *sdev) { return sbitmap_weight(&sdev->budget_map); } #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" #endif /* _SCSI_SCSI_DEVICE_H */
6 2 6 1 6 1 6 1 6 1 5 5 1 5 6 1 5 5 1 5 6 6 6 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> */ /* Module: rt73usb Abstract: rt73usb device specific routines. Supported chipsets: rt2571W & rt2671. */ #include <linux/crc-itu-t.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt73usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2x00usb_register_read and rt2x00usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * The _lock versions must be used if you already hold the csr_mutex */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2x00usb_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value); rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value); /* * RF5225 and RF2527 contain 21 bits per RF register value, * all others contain 20 bits. */ rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 20 + (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))); rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR4, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static const struct rt2x00debug rt73usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2x00usb_register_read, .write = rt2x00usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt73usb_bbp_read, .write = rt73usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt73usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13); return rt2x00_get_field32(reg, MAC_CSR13_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt73usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int a_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ); unsigned int bg_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); if (led->type == LED_TYPE_RADIO) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_RADIO_STATUS, enabled); rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, 0, led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } else if (led->type == LED_TYPE_ASSOC) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_BG_STATUS, bg_mode); rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_A_STATUS, a_mode); rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, 0, led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } else if (led->type == LED_TYPE_QUALITY) { /* * The brightness is divided into 6 levels (0 - 5), * this means we need to convert the brightness * argument into the matching level within that range. */ rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, brightness / (LED_FULL / 6), led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } } static int rt73usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u32 reg; reg = rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14); rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on); rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off); rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg); return 0; } static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt73usb_brightness_set; led->led_dev.blink_set = rt73usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_key_entry key_entry; struct rt2x00_field32 field; u32 mask; u32 reg; if (crypto->cmd == SET_KEY) { /* * rt2x00lib can't determine the correct free * key_idx for shared keys. We have 1 register * with key valid bits. The goal is simple, read * the register, if that is full we have no slots * left. * Note that each BSS is allowed to have up to 4 * shared keys, so put a mask over the allowed * entries. */ mask = (0xf << crypto->bssidx); reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0); reg &= mask; if (reg && reg == mask) return -ENOSPC; key->hw_key_idx += reg ? ffz(reg) : 0; /* * Upload key to hardware */ memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); reg = SHARED_KEY_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &key_entry, sizeof(key_entry)); /* * The cipher types are stored over 2 registers. * bssidx 0 and 1 keys are stored in SEC_CSR1 and * bssidx 1 and 2 keys are stored in SEC_CSR5. * Using the correct defines correctly will cause overhead, * so just calculate the correct offset. */ if (key->hw_key_idx < 8) { field.bit_offset = (3 * key->hw_key_idx); field.bit_mask = 0x7 << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR1); rt2x00_set_field32(&reg, field, crypto->cipher); rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg); } else { field.bit_offset = (3 * (key->hw_key_idx - 8)); field.bit_mask = 0x7 << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR5); rt2x00_set_field32(&reg, field, crypto->cipher); rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg); } /* * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } /* * SEC_CSR0 contains only single-bit fields to indicate * a particular key is valid. Because using the FIELD32() * defines directly will cause a lot of overhead we use * a calculation to determine the correct bit directly. */ mask = 1 << key->hw_key_idx; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR0, reg); return 0; } static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_pairwise_ta_entry addr_entry; struct hw_key_entry key_entry; u32 mask; u32 reg; if (crypto->cmd == SET_KEY) { /* * rt2x00lib can't determine the correct free * key_idx for pairwise keys. We have 2 registers * with key valid bits. The goal is simple, read * the first register, if that is full move to * the next register. * When both registers are full, we drop the key, * otherwise we use the first invalid entry. */ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2); if (reg && reg == ~0) { key->hw_key_idx = 32; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3); if (reg && reg == ~0) return -ENOSPC; } key->hw_key_idx += reg ? ffz(reg) : 0; /* * Upload key to hardware */ memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &key_entry, sizeof(key_entry)); /* * Send the address and cipher type to the hardware register. */ memset(&addr_entry, 0, sizeof(addr_entry)); memcpy(&addr_entry, crypto->address, ETH_ALEN); addr_entry.cipher = crypto->cipher; reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &addr_entry, sizeof(addr_entry)); /* * Enable pairwise lookup table for given BSS idx, * without this received frames will not be decrypted * by the hardware. */ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR4); reg |= (1 << crypto->bssidx); rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg); /* * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } /* * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate * a particular key is valid. Because using the FIELD32() * defines directly will cause a lot of overhead we use * a calculation to determine the correct bit directly. */ if (key->hw_key_idx < 32) { mask = 1 << key->hw_key_idx; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR2, reg); } else { mask = 1 << (key->hw_key_idx - 32); reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR3, reg); } return 0; } static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, !(filter_flags & FIF_CONTROL)); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); } static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable synchronisation. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); } if (flags & CONFIG_UPDATE_MAC) { reg = le32_to_cpu(conf->mac[1]); rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); conf->mac[1] = cpu_to_le32(reg); rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, sizeof(conf->mac)); } if (flags & CONFIG_UPDATE_BSSID) { reg = le32_to_cpu(conf->bssid[1]); rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3); conf->bssid[1] = cpu_to_le32(reg); rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR4, conf->bssid, sizeof(conf->bssid)); } } static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, erp->beacon_int * 16); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg); } } static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; u8 temp; r3 = rt73usb_bbp_read(rt2x00dev, 3); r4 = rt73usb_bbp_read(rt2x00dev, 4); r77 = rt73usb_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); temp = !rt2x00_has_cap_frame_type(rt2x00dev) && (rt2x00dev->curr_band != NL80211_BAND_5GHZ); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); break; case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); break; } rt73usb_bbp_write(rt2x00dev, 77, r77); rt73usb_bbp_write(rt2x00dev, 3, r3); rt73usb_bbp_write(rt2x00dev, 4, r4); } static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; r3 = rt73usb_bbp_read(rt2x00dev, 3); r4 = rt73usb_bbp_read(rt2x00dev, 4); r77 = rt73usb_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, !rt2x00_has_cap_frame_type(rt2x00dev)); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); break; case ANTENNA_A: rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); break; case ANTENNA_B: default: rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); break; } rt73usb_bbp_write(rt2x00dev, 77, r77); rt73usb_bbp_write(rt2x00dev, 3, r3); rt73usb_bbp_write(rt2x00dev, 4, r4); } struct antenna_sel { u8 word; /* * value[0] -> non-LNA * value[1] -> LNA */ u8 value[2]; }; static const struct antenna_sel antenna_sel_a[] = { { 96, { 0x58, 0x78 } }, { 104, { 0x38, 0x48 } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x60, 0x60 } }, { 97, { 0x58, 0x58 } }, { 98, { 0x58, 0x58 } }, }; static const struct antenna_sel antenna_sel_bg[] = { { 96, { 0x48, 0x68 } }, { 104, { 0x2c, 0x3c } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x50, 0x50 } }, { 97, { 0x48, 0x48 } }, { 98, { 0x48, 0x48 } }, }; static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { const struct antenna_sel *sel; unsigned int lna; unsigned int i; u32 reg; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { sel = antenna_sel_a; lna = rt2x00_has_cap_external_lna_a(rt2x00dev); } else { sel = antenna_sel_bg; lna = rt2x00_has_cap_external_lna_bg(rt2x00dev); } for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); reg = rt2x00usb_register_read(rt2x00dev, PHY_CSR0); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, (rt2x00dev->curr_band == NL80211_BAND_2GHZ)); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, (rt2x00dev->curr_band == NL80211_BAND_5GHZ)); rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225)) rt73usb_config_antenna_5x(rt2x00dev, ant); else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527)) rt73usb_config_antenna_2x(rt2x00dev, ant); } static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u16 eeprom; short lna_gain = 0; if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) lna_gain += 14; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); } else { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); } rt2x00dev->lna_gain = lna_gain; } static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { u8 r3; u8 r94; u8 smart; rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)); r3 = rt73usb_bbp_read(rt2x00dev, 3); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); rt73usb_bbp_write(rt2x00dev, 3, r3); r94 = 6; if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) r94 += txpower - MAX_TXPOWER; else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) r94 += txpower; rt73usb_bbp_write(rt2x00dev, 94, r94); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); udelay(10); } static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { struct rf_channel rf; rf.rf1 = rt2x00_rf_read(rt2x00dev, 1); rf.rf2 = rt2x00_rf_read(rt2x00dev, 2); rf.rf3 = rt2x00_rf_read(rt2x00dev, 3); rf.rf4 = rt2x00_rf_read(rt2x00dev, 4); rt73usb_config_channel(rt2x00dev, &rf, txpower); } static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, libconf->conf->short_frame_max_tx_count); rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); } static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, rt2x00dev->beacon_int - 10); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5); /* We must first disable autowake before it can be enabled */ rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_SLEEP, REGISTER_TIMEOUT); } else { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_WAKEUP, REGISTER_TIMEOUT); } } static void rt73usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { /* Always recalculate LNA gain before changing configuration */ rt73usb_config_lna_gain(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt73usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt73usb_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt73usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; /* * Update FCS error count from register. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1); qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); } static inline void rt73usb_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level != vgc_level) { rt73usb_bbp_write(rt2x00dev, 17, vgc_level); qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt73usb_set_vgc(rt2x00dev, qual, 0x20); } static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { u8 up_bound; u8 low_bound; /* * Determine r17 bounds. */ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { low_bound = 0x28; up_bound = 0x48; if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { low_bound += 0x10; up_bound += 0x10; } } else { if (qual->rssi > -82) { low_bound = 0x1c; up_bound = 0x40; } else if (qual->rssi > -84) { low_bound = 0x1c; up_bound = 0x20; } else { low_bound = 0x1c; up_bound = 0x1c; } if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { low_bound += 0x14; up_bound += 0x10; } } /* * If we are not associated, we should go straight to the * dynamic CCA tuning. */ if (!rt2x00dev->intf_associated) goto dynamic_cca_tune; /* * Special big-R17 for very short distance */ if (qual->rssi > -35) { rt73usb_set_vgc(rt2x00dev, qual, 0x60); return; } /* * Special big-R17 for short distance */ if (qual->rssi >= -58) { rt73usb_set_vgc(rt2x00dev, qual, up_bound); return; } /* * Special big-R17 for middle-short distance */ if (qual->rssi >= -66) { rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x10); return; } /* * Special mid-R17 for middle distance */ if (qual->rssi >= -74) { rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x08); return; } /* * Special case: Change up_bound based on the rssi. * Lower up_bound when rssi is weaker then -74 dBm. */ up_bound -= 2 * (-74 - qual->rssi); if (low_bound > up_bound) up_bound = low_bound; if (qual->vgc_level > up_bound) { rt73usb_set_vgc(rt2x00dev, qual, up_bound); return; } dynamic_cca_tune: /* * r17 does not yet exceed upper limit, continue and base * the r17 tuning on the false CCA count. */ if ((qual->false_cca > 512) && (qual->vgc_level < up_bound)) rt73usb_set_vgc(rt2x00dev, qual, min_t(u8, qual->vgc_level + 4, up_bound)); else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound)) rt73usb_set_vgc(rt2x00dev, qual, max_t(u8, qual->vgc_level - 4, low_bound)); } /* * Queue handlers. */ static void rt73usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); break; default: break; } } static void rt73usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); break; default: break; } } /* * Firmware functions */ static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) { return FIRMWARE_RT2571; } static int rt73usb_check_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { u16 fw_crc; u16 crc; /* * Only support 2kb firmware files. */ if (len != 2048) return FW_BAD_LENGTH; /* * The last 2 bytes in the firmware array are the crc checksum itself, * this means that we should never pass those 2 bytes to the crc * algorithm. */ fw_crc = (data[len - 2] << 8 | data[len - 1]); /* * Use the crc itu-t algorithm. */ crc = crc_itu_t(0, data, len - 2); crc = crc_itu_t_byte(crc, 0); crc = crc_itu_t_byte(crc, 0); return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; } static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { unsigned int i; int status; u32 reg; /* * Wait for stable hardware. */ for (i = 0; i < 100; i++) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0); if (reg) break; msleep(1); } if (!reg) { rt2x00_err(rt2x00dev, "Unstable hardware\n"); return -EBUSY; } /* * Write firmware to device. */ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); /* * Send firmware request to device to load firmware, * we need to specify a long timeout time. */ status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_FIRMWARE, REGISTER_TIMEOUT_FIRMWARE); if (status < 0) { rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n"); return status; } return 0; } /* * Initialization functions. */ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR1, reg); /* * CCK TXD BBP registers */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2, 11); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR2, reg); /* * OFDM TXD BBP registers */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR3); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46); rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42); rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR6); rt2x00_set_field32(&reg, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg); rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2x00usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); /* * Invalidate all Shared Keys (SEC_CSR0), * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) */ rt2x00usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); rt2x00usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); reg = 0x000023b0; if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)) rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); rt2x00usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); /* * Clear all beacons * For the Beacon base registers we only need to clear * the first byte since that byte contains the VALID and OWNER * bits which (when set to 0) will invalidate the entire beacon. */ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); /* * We must clear the error counters. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0); reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1); reg = rt2x00usb_register_read(rt2x00dev, STA_CSR2); /* * Reset MAC and BBP registers. */ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); return 0; } static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { value = rt73usb_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (unlikely(rt73usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt73usb_bbp_write(rt2x00dev, 3, 0x80); rt73usb_bbp_write(rt2x00dev, 15, 0x30); rt73usb_bbp_write(rt2x00dev, 21, 0xc8); rt73usb_bbp_write(rt2x00dev, 22, 0x38); rt73usb_bbp_write(rt2x00dev, 23, 0x06); rt73usb_bbp_write(rt2x00dev, 24, 0xfe); rt73usb_bbp_write(rt2x00dev, 25, 0x0a); rt73usb_bbp_write(rt2x00dev, 26, 0x0d); rt73usb_bbp_write(rt2x00dev, 32, 0x0b); rt73usb_bbp_write(rt2x00dev, 34, 0x12); rt73usb_bbp_write(rt2x00dev, 37, 0x07); rt73usb_bbp_write(rt2x00dev, 39, 0xf8); rt73usb_bbp_write(rt2x00dev, 41, 0x60); rt73usb_bbp_write(rt2x00dev, 53, 0x10); rt73usb_bbp_write(rt2x00dev, 54, 0x18); rt73usb_bbp_write(rt2x00dev, 60, 0x10); rt73usb_bbp_write(rt2x00dev, 61, 0x04); rt73usb_bbp_write(rt2x00dev, 62, 0x04); rt73usb_bbp_write(rt2x00dev, 75, 0xfe); rt73usb_bbp_write(rt2x00dev, 86, 0xfe); rt73usb_bbp_write(rt2x00dev, 88, 0xfe); rt73usb_bbp_write(rt2x00dev, 90, 0x0f); rt73usb_bbp_write(rt2x00dev, 99, 0x00); rt73usb_bbp_write(rt2x00dev, 102, 0x16); rt73usb_bbp_write(rt2x00dev, 107, 0x04); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt73usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt73usb_init_registers(rt2x00dev) || rt73usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); /* * Disable synchronisation. */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; bool put_to_sleep; put_to_sleep = (state != STATE_AWAKE); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR12); rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg2 = rt2x00usb_register_read(rt2x00dev, MAC_CSR12); state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); if (state == !put_to_sleep) return 0; rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); msleep(10); } return -EBUSY; } static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt73usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt73usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt73usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt73usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_VALID, 1); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_BURST2, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); rt2x00_desc_write(txd, 0, word); word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } word = rt2x00_desc_read(txd, 5); rt2x00_set_field32(&word, TXD_W5_TX_POWER, TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power)); rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); rt2x00_desc_write(txd, 5, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt73usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; unsigned int beacon_base; unsigned int padding_len; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); orig_reg = reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt73usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with descriptor and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; if (padding_len && skb_pad(entry->skb, padding_len)) { rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n"); /* skb freed by skb_pad() on failure */ entry->skb = NULL; rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); return; } beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); /* * Enable beaconing again. * * For Wi-Fi faily generated beacons between participating stations. * Set TBTT phase adaptive adjustment step to 8us (default 16us) */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clean up the beacon skb. */ dev_kfree_skb(entry->skb); entry->skb = NULL; } static void rt73usb_clear_beacon(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; unsigned int beacon_base; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ orig_reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); reg = orig_reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clear beacon. */ beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00usb_register_write(rt2x00dev, beacon_base, 0); /* * Restore beaconing state. */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); } static int rt73usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 4, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 4); length += (4 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) { u8 offset = rt2x00dev->lna_gain; u8 lna; lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); switch (lna) { case 3: offset += 90; break; case 2: offset += 74; break; case 1: offset += 64; break; default: return 0; } if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { if (lna == 3 || lna == 2) offset += 10; } else { if (lna == 3) offset += 6; else if (lna == 2) offset += 8; } } return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; } static void rt73usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)entry->skb->data; u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ word0 = rt2x00_desc_read(rxd, 0); word1 = rt2x00_desc_read(rxd, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG); rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR); if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2); rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; rxdesc->icv = _rt2x00_desc_read(rxd, 4); rxdesc->dev_flags |= RXDONE_CRYPTO_ICV; /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. It has provided the data separately but rt2x00lib * should decide if it should be reinserted. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1); rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Set skb pointers, and update frame information. */ skb_pull(entry->skb, entry->queue->desc_size); skb_trim(entry->skb, rxdesc->size); } /* * Device probe functions. */ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; s8 value; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0); rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, LED_MODE_DEFAULT); rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); } return 0; } static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF5226) && !rt2x00_rf(rt2x00dev, RF2528) && !rt2x00_rf(rt2x00dev, RF5225) && !rt2x00_rf(rt2x00dev, RF2527)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * Read the Frame type. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) __set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags); /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read frequency offset. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); /* * Read external LNA informations. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); } /* * Store led settings, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); rt73usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); if (value == LED_MODE_SIGNAL_STRENGTH) rt73usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_0)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_1)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_2)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_3)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_4)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_G)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_A)); #endif /* CONFIG_RT2X00_LIB_LEDS */ return 0; } /* * RF value list for RF2528 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2528[] = { { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, }; /* * RF value list for RF5226 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5226[] = { { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 }, { 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 }, { 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b }, { 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 }, { 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b }, { 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 }, { 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 }, { 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b }, /* 802.11 HyperLan 2 */ { 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 }, { 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b }, { 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 }, { 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b }, { 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 }, { 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 }, { 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b }, { 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 }, { 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b }, { 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 }, /* 802.11 UNII */ { 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 }, { 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f }, { 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 }, { 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 }, { 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f }, { 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b }, { 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 }, { 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b }, { 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 }, }; /* * RF value list for RF5225 & RF2527 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5225_2527[] = { { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, /* 802.11 HyperLan 2 */ { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, /* 802.11 UNII */ { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, }; static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2528)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); spec->channels = rf_vals_bg_2528; } else if (rt2x00_rf(rt2x00dev, RF5226)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5226); spec->channels = rf_vals_5226; } else if (rt2x00_rf(rt2x00dev, RF2527)) { spec->num_channels = 14; spec->channels = rf_vals_5225_2527; } else if (rt2x00_rf(rt2x00dev, RF5225)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); spec->channels = rf_vals_5225_2527; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i - 14]); } } return 0; } static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; /* * Allocate eeprom data. */ retval = rt73usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt73usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13); rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); /* * Initialize hw specifications. */ retval = rt73usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device has multiple filters for control frames, * but has no a separate filter for PS Poll frames. */ __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags); /* * This device requires firmware. */ __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } /* * IEEE80211 stack callback functions. */ static int rt73usb_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; struct rt2x00_field32 field; int retval; u32 reg; u32 offset; /* * First pass the configuration through rt2x00lib, that will * update the queue settings and validate the input. After that * we are free to update the registers based on the value * in the queue parameter. */ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params); if (retval) return retval; /* * We only need to perform additional register initialization * for WMM queues/ */ if (queue_idx >= 4) return 0; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); field.bit_offset = (queue_idx & 1) * 16; field.bit_mask = 0xffff << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, field, queue->txop); rt2x00usb_register_write(rt2x00dev, offset, reg); /* Update WMM registers */ field.bit_offset = queue_idx * 4; field.bit_mask = 0xf << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, AIFSN_CSR); rt2x00_set_field32(&reg, field, queue->aifs); rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg); reg = rt2x00usb_register_read(rt2x00dev, CWMIN_CSR); rt2x00_set_field32(&reg, field, queue->cw_min); rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg); reg = rt2x00usb_register_read(rt2x00dev, CWMAX_CSR); rt2x00_set_field32(&reg, field, queue->cw_max); rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg); return 0; } static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR13); tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR12); tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); return tsf; } static const struct ieee80211_ops rt73usb_mac80211_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt73usb_conf_tx, .get_tsf = rt73usb_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { .probe_hw = rt73usb_probe_hw, .get_firmware_name = rt73usb_get_firmware_name, .check_firmware = rt73usb_check_firmware, .load_firmware = rt73usb_load_firmware, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt73usb_set_device_state, .rfkill_poll = rt73usb_rfkill_poll, .link_stats = rt73usb_link_stats, .reset_tuner = rt73usb_reset_tuner, .link_tuner = rt73usb_link_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt73usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt73usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt73usb_write_tx_desc, .write_beacon = rt73usb_write_beacon, .clear_beacon = rt73usb_clear_beacon, .get_tx_data_len = rt73usb_get_tx_data_len, .fill_rxdone = rt73usb_fill_rxdone, .config_shared_key = rt73usb_config_shared_key, .config_pairwise_key = rt73usb_config_pairwise_key, .config_filter = rt73usb_config_filter, .config_intf = rt73usb_config_intf, .config_erp = rt73usb_config_erp, .config_ant = rt73usb_config_ant, .config = rt73usb_config, }; static void rt73usb_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 4; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXINFO_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_ATIM: default: BUG(); break; } } static const struct rt2x00_ops rt73usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 4, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt73usb_queue_init, .lib = &rt73usb_rt2x00_ops, .hw = &rt73usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt73usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt73usb module information. */ static const struct usb_device_id rt73usb_device_table[] = { /* AboCom */ { USB_DEVICE(0x07b8, 0xb21b) }, { USB_DEVICE(0x07b8, 0xb21c) }, { USB_DEVICE(0x07b8, 0xb21d) }, { USB_DEVICE(0x07b8, 0xb21e) }, { USB_DEVICE(0x07b8, 0xb21f) }, /* AL */ { USB_DEVICE(0x14b2, 0x3c10) }, /* Amigo */ { USB_DEVICE(0x148f, 0x9021) }, { USB_DEVICE(0x0eb0, 0x9021) }, /* AMIT */ { USB_DEVICE(0x18c5, 0x0002) }, /* Askey */ { USB_DEVICE(0x1690, 0x0722) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x1723) }, { USB_DEVICE(0x0b05, 0x1724) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */ { USB_DEVICE(0x050d, 0x705a) }, { USB_DEVICE(0x050d, 0x905b) }, { USB_DEVICE(0x050d, 0x905c) }, /* Billionton */ { USB_DEVICE(0x1631, 0xc019) }, { USB_DEVICE(0x08dd, 0x0120) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00d8) }, { USB_DEVICE(0x0411, 0x00d9) }, { USB_DEVICE(0x0411, 0x00e6) }, { USB_DEVICE(0x0411, 0x00f4) }, { USB_DEVICE(0x0411, 0x0116) }, { USB_DEVICE(0x0411, 0x0119) }, { USB_DEVICE(0x0411, 0x0137) }, /* CEIVA */ { USB_DEVICE(0x178d, 0x02be) }, /* CNet */ { USB_DEVICE(0x1371, 0x9022) }, { USB_DEVICE(0x1371, 0x9032) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c22) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002e) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c03) }, { USB_DEVICE(0x07d1, 0x3c04) }, { USB_DEVICE(0x07d1, 0x3c06) }, { USB_DEVICE(0x07d1, 0x3c07) }, /* Edimax */ { USB_DEVICE(0x7392, 0x7318) }, { USB_DEVICE(0x7392, 0x7618) }, /* EnGenius */ { USB_DEVICE(0x1740, 0x3701) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0004) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8008) }, { USB_DEVICE(0x1044, 0x800a) }, /* Huawei-3Com */ { USB_DEVICE(0x1472, 0x0009) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe002) }, { USB_DEVICE(0x06f8, 0xe010) }, { USB_DEVICE(0x06f8, 0xe020) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x0020) }, { USB_DEVICE(0x13b1, 0x0023) }, { USB_DEVICE(0x13b1, 0x0028) }, /* MSI */ { USB_DEVICE(0x0db0, 0x4600) }, { USB_DEVICE(0x0db0, 0x6877) }, { USB_DEVICE(0x0db0, 0x6874) }, { USB_DEVICE(0x0db0, 0xa861) }, { USB_DEVICE(0x0db0, 0xa874) }, /* Ovislink */ { USB_DEVICE(0x1b75, 0x7318) }, /* Ralink */ { USB_DEVICE(0x04bb, 0x093d) }, { USB_DEVICE(0x148f, 0x2573) }, { USB_DEVICE(0x148f, 0x2671) }, { USB_DEVICE(0x0812, 0x3101) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6196) }, { USB_DEVICE(0x18e8, 0x6229) }, { USB_DEVICE(0x18e8, 0x6238) }, /* Samsung */ { USB_DEVICE(0x04e8, 0x4471) }, /* Senao */ { USB_DEVICE(0x1740, 0x7100) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0024) }, { USB_DEVICE(0x0df6, 0x0027) }, { USB_DEVICE(0x0df6, 0x002f) }, { USB_DEVICE(0x0df6, 0x90ac) }, { USB_DEVICE(0x0df6, 0x9712) }, /* Surecom */ { USB_DEVICE(0x0769, 0x31f3) }, /* Tilgin */ { USB_DEVICE(0x6933, 0x5001) }, /* Philips */ { USB_DEVICE(0x0471, 0x200a) }, /* Planex */ { USB_DEVICE(0x2019, 0xab01) }, { USB_DEVICE(0x2019, 0xab50) }, /* WideTell */ { USB_DEVICE(0x7167, 0x3840) }, /* Zcom */ { USB_DEVICE(0x0cde, 0x001c) }, /* ZyXEL */ { USB_DEVICE(0x0586, 0x3415) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt73usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2571); MODULE_LICENSE("GPL"); static int rt73usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt73usb_ops); } static struct usb_driver rt73usb_driver = { .name = KBUILD_MODNAME, .id_table = rt73usb_device_table, .probe = rt73usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt73usb_driver);
13 2 13 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 // SPDX-License-Identifier: GPL-2.0-or-later /* * Support for the sensor part which is integrated (I think) into the * st6422 stv06xx alike bridge, as its integrated there are no i2c writes * but instead direct bridge writes. * * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com> * * Strongly based on qc-usb-messenger, which is: * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland * Copyright (c) 2002, 2003 Tuukka Toivonen */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "stv06xx_st6422.h" static struct v4l2_pix_format st6422_mode[] = { /* Note we actually get 124 lines of data, of which we skip the 4st 4 as they are garbage */ { 162, 120, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = 162 * 120, .bytesperline = 162, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, /* Note we actually get 248 lines of data, of which we skip the 4st 4 as they are garbage, and we tell the app it only gets the first 240 of the 244 lines it actually gets, so that it ignores the last 4. */ { 324, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = 324 * 244, .bytesperline = 324, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, }; /* V4L2 controls supported by the driver */ static int setbrightness(struct sd *sd, s32 val); static int setcontrast(struct sd *sd, s32 val); static int setgain(struct sd *sd, u8 gain); static int setexposure(struct sd *sd, s16 expo); static int st6422_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; int err = -EINVAL; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: err = setbrightness(sd, ctrl->val); break; case V4L2_CID_CONTRAST: err = setcontrast(sd, ctrl->val); break; case V4L2_CID_GAIN: err = setgain(sd, ctrl->val); break; case V4L2_CID_EXPOSURE: err = setexposure(sd, ctrl->val); break; } /* commit settings */ if (err >= 0) err = stv06xx_write_bridge(sd, 0x143f, 0x01); sd->gspca_dev.usb_err = err; return err; } static const struct v4l2_ctrl_ops st6422_ctrl_ops = { .s_ctrl = st6422_s_ctrl, }; static int st6422_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 31, 1, 3); v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops, V4L2_CID_CONTRAST, 0, 15, 1, 11); v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops, V4L2_CID_EXPOSURE, 0, 1023, 1, 256); v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, 64); return hdl->error; } static int st6422_probe(struct sd *sd) { if (sd->bridge != BRIDGE_ST6422) return -ENODEV; pr_info("st6422 sensor detected\n"); sd->gspca_dev.cam.cam_mode = st6422_mode; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode); return 0; } static int st6422_init(struct sd *sd) { int err = 0, i; static const u16 st6422_bridge_init[][2] = { { STV_ISO_ENABLE, 0x00 }, /* disable capture */ { 0x1436, 0x00 }, { 0x1432, 0x03 }, /* 0x00-0x1F brightness */ { 0x143a, 0xf9 }, /* 0x00-0x0F contrast */ { 0x0509, 0x38 }, /* R */ { 0x050a, 0x38 }, /* G */ { 0x050b, 0x38 }, /* B */ { 0x050c, 0x2a }, { 0x050d, 0x01 }, { 0x1431, 0x00 }, /* 0x00-0x07 ??? */ { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */ { 0x1438, 0x18 }, /* 640x480 */ /* 18 bayes */ /* 10 compressed? */ { 0x1439, 0x00 }, /* anti-noise? 0xa2 gives a perfect image */ { 0x143b, 0x05 }, { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */ /* shutter time 0x0000-0x03FF */ /* low value give good picures on moving objects (but requires much light) */ /* high value gives good picures in darkness (but tends to be overexposed) */ { 0x143e, 0x01 }, { 0x143d, 0x00 }, { 0x1442, 0xe2 }, /* write: 1x1x xxxx */ /* read: 1x1x xxxx */ /* bit 5 == button pressed and hold if 0 */ /* write 0xe2,0xea */ /* 0x144a */ /* 0x00 init */ /* bit 7 == button has been pressed, but not handled */ /* interrupt */ /* if(urb->iso_frame_desc[i].status == 0x80) { */ /* if(urb->iso_frame_desc[i].status == 0x88) { */ { 0x1500, 0xd0 }, { 0x1500, 0xd0 }, { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */ { 0x1501, 0xaf }, /* high val-> light area gets darker */ /* low val -> light area gets lighter */ { 0x1502, 0xc2 }, /* high val-> light area gets darker */ /* low val -> light area gets lighter */ { 0x1503, 0x45 }, /* high val-> light area gets darker */ /* low val -> light area gets lighter */ { 0x1505, 0x02 }, /* 2 : 324x248 80352 bytes */ /* 7 : 248x162 40176 bytes */ /* c+f: 162*124 20088 bytes */ { 0x150e, 0x8e }, { 0x150f, 0x37 }, { 0x15c0, 0x00 }, { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */ { 0x143f, 0x01 }, /* commit settings */ }; for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) { err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0], st6422_bridge_init[i][1]); } return err; } static int setbrightness(struct sd *sd, s32 val) { /* val goes from 0 -> 31 */ return stv06xx_write_bridge(sd, 0x1432, val); } static int setcontrast(struct sd *sd, s32 val) { /* Val goes from 0 -> 15 */ return stv06xx_write_bridge(sd, 0x143a, val | 0xf0); } static int setgain(struct sd *sd, u8 gain) { int err; /* Set red, green, blue, gain */ err = stv06xx_write_bridge(sd, 0x0509, gain); if (err < 0) return err; err = stv06xx_write_bridge(sd, 0x050a, gain); if (err < 0) return err; err = stv06xx_write_bridge(sd, 0x050b, gain); if (err < 0) return err; /* 2 mystery writes */ err = stv06xx_write_bridge(sd, 0x050c, 0x2a); if (err < 0) return err; return stv06xx_write_bridge(sd, 0x050d, 0x01); } static int setexposure(struct sd *sd, s16 expo) { int err; err = stv06xx_write_bridge(sd, 0x143d, expo & 0xff); if (err < 0) return err; return stv06xx_write_bridge(sd, 0x143e, expo >> 8); } static int st6422_start(struct sd *sd) { int err; struct cam *cam = &sd->gspca_dev.cam; if (cam->cam_mode[sd->gspca_dev.curr_mode].priv) err = stv06xx_write_bridge(sd, 0x1505, 0x0f); else err = stv06xx_write_bridge(sd, 0x1505, 0x02); if (err < 0) return err; /* commit settings */ err = stv06xx_write_bridge(sd, 0x143f, 0x01); return (err < 0) ? err : 0; } static int st6422_stop(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; gspca_dbg(gspca_dev, D_STREAM, "Halting stream\n"); return 0; }
1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 // SPDX-License-Identifier: GPL-2.0-or-later /* * Jeilinj subdriver * * Supports some Jeilin dual-mode cameras which use bulk transport and * download raw JPEG data. * * Copyright (C) 2009 Theodore Kilgore * * Sportscam DV15 support and control settings are * Copyright (C) 2011 Patrice Chotard */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "jeilinj" #include <linux/slab.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/JEILINJ USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JEILINJ_CMD_TIMEOUT 500 #define JEILINJ_CMD_DELAY 160 #define JEILINJ_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JEILINJ_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 0x10 #define FRAME_START 0xFFFFFFFF enum { SAKAR_57379, SPORTSCAM_DV15, }; #define CAMQUALITY_MIN 0 /* highest cam quality */ #define CAMQUALITY_MAX 97 /* lowest cam quality */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ int blocks_left; const struct v4l2_pix_format *cap_mode; struct v4l2_ctrl *freq; struct v4l2_ctrl *jpegqual; /* Driver stuff */ u8 type; u8 quality; /* image quality */ #define QUALITY_MIN 35 #define QUALITY_MAX 85 #define QUALITY_DEF 85 u8 jpeg_hdr[JPEG_HDR_SZ]; }; struct jlj_command { unsigned char instruction[2]; unsigned char ack_wanted; unsigned char delay; }; /* AFAICT these cameras will only do 320x240. */ static struct v4l2_pix_format jlj_mode[] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, { 640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0} }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk transfer. */ /* All commands are two bytes only */ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) { pr_err("command write [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } /* Responses are one byte only */ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char *response) { int retval; if (gspca_dev->usb_err < 0) return; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); *response = gspca_dev->usb_buf[0]; if (retval < 0) { pr_err("read command [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } static void setfreq(struct gspca_dev *gspca_dev, s32 val) { u8 freq_commands[][2] = { {0x71, 0x80}, {0x70, 0x07} }; freq_commands[0][1] |= val >> 1; jlj_write2(gspca_dev, freq_commands[0]); jlj_write2(gspca_dev, freq_commands[1]); } static void setcamquality(struct gspca_dev *gspca_dev, s32 val) { u8 quality_commands[][2] = { {0x71, 0x1E}, {0x70, 0x06} }; u8 camquality; /* adapt camera quality from jpeg quality */ camquality = ((QUALITY_MAX - val) * CAMQUALITY_MAX) / (QUALITY_MAX - QUALITY_MIN); quality_commands[0][1] += camquality; jlj_write2(gspca_dev, quality_commands[0]); jlj_write2(gspca_dev, quality_commands[1]); } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { u8 autogain_commands[][2] = { {0x94, 0x02}, {0xcf, 0x00} }; autogain_commands[1][1] = val << 4; jlj_write2(gspca_dev, autogain_commands[0]); jlj_write2(gspca_dev, autogain_commands[1]); } static void setred(struct gspca_dev *gspca_dev, s32 val) { u8 setred_commands[][2] = { {0x94, 0x02}, {0xe6, 0x00} }; setred_commands[1][1] = val; jlj_write2(gspca_dev, setred_commands[0]); jlj_write2(gspca_dev, setred_commands[1]); } static void setgreen(struct gspca_dev *gspca_dev, s32 val) { u8 setgreen_commands[][2] = { {0x94, 0x02}, {0xe7, 0x00} }; setgreen_commands[1][1] = val; jlj_write2(gspca_dev, setgreen_commands[0]); jlj_write2(gspca_dev, setgreen_commands[1]); } static void setblue(struct gspca_dev *gspca_dev, s32 val) { u8 setblue_commands[][2] = { {0x94, 0x02}, {0xe9, 0x00} }; setblue_commands[1][1] = val; jlj_write2(gspca_dev, setblue_commands[0]); jlj_write2(gspca_dev, setblue_commands[1]); } static int jlj_start(struct gspca_dev *gspca_dev) { int i; int start_commands_size; u8 response = 0xff; struct sd *sd = (struct sd *) gspca_dev; struct jlj_command start_commands[] = { {{0x71, 0x81}, 0, 0}, {{0x70, 0x05}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x81 - gspca_dev->curr_mode}, 0, 0}, {{0x70, 0x04}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x00}, 0, 0}, /* start streaming ??*/ {{0x70, 0x08}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, #define SPORTSCAM_DV15_CMD_SIZE 9 {{0x94, 0x02}, 0, 0}, {{0xde, 0x24}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xdd, 0xf0}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe3, 0x2c}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe4, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe5, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe6, 0x2c}, 0, 0}, {{0x94, 0x03}, 0, 0}, {{0xaa, 0x00}, 0, 0} }; sd->blocks_left = 0; /* Under Windows, USB spy shows that only the 9 first start * commands are used for SPORTSCAM_DV15 webcam */ if (sd->type == SPORTSCAM_DV15) start_commands_size = SPORTSCAM_DV15_CMD_SIZE; else start_commands_size = ARRAY_SIZE(start_commands); for (i = 0; i < start_commands_size; i++) { jlj_write2(gspca_dev, start_commands[i].instruction); if (start_commands[i].delay) msleep(start_commands[i].delay); if (start_commands[i].ack_wanted) jlj_read1(gspca_dev, &response); } setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); msleep(2); setfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->freq)); if (gspca_dev->usb_err < 0) gspca_err(gspca_dev, "Start streaming command failed\n"); return gspca_dev->usb_err; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; int packet_type; u32 header_marker; gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for Block 0\n", len, JEILINJ_MAX_TRANSFER); if (len != JEILINJ_MAX_TRANSFER) { gspca_dbg(gspca_dev, D_PACK, "bad length\n"); goto discard; } /* check if it's start of frame */ header_marker = ((u32 *)data)[0]; if (header_marker == FRAME_START) { sd->blocks_left = data[0x0a] - 1; gspca_dbg(gspca_dev, D_STREAM, "blocks_left = 0x%x\n", sd->blocks_left); /* Start a new frame, and add the JPEG header, first thing */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* Toss line 0 of data block 0, keep the rest. */ gspca_frame_add(gspca_dev, INTER_PACKET, data + FRAME_HEADER_LEN, JEILINJ_MAX_TRANSFER - FRAME_HEADER_LEN); } else if (sd->blocks_left > 0) { gspca_dbg(gspca_dev, D_STREAM, "%d blocks remaining for frame\n", sd->blocks_left); sd->blocks_left -= 1; if (sd->blocks_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, data, JEILINJ_MAX_TRANSFER); } else goto discard; return; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; dev->type = id->driver_info; dev->quality = QUALITY_DEF; cam->cam_mode = jlj_mode; cam->nmodes = ARRAY_SIZE(jlj_mode); cam->bulk = 1; cam->bulk_nurbs = 1; cam->bulk_size = JEILINJ_MAX_TRANSFER; return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { int i; u8 *buf; static u8 stop_commands[][2] = { {0x71, 0x00}, {0x70, 0x09}, {0x71, 0x80}, {0x70, 0x05} }; for (;;) { /* get the image remaining blocks */ usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, JEILINJ_MAX_TRANSFER, NULL, JEILINJ_DATA_TIMEOUT); /* search for 0xff 0xd9 (EOF for JPEG) */ i = 0; buf = gspca_dev->urb[0]->transfer_buffer; while ((i < (JEILINJ_MAX_TRANSFER - 1)) && ((buf[i] != 0xff) || (buf[i+1] != 0xd9))) i++; if (i != (JEILINJ_MAX_TRANSFER - 1)) /* last remaining block found */ break; } for (i = 0; i < ARRAY_SIZE(stop_commands); i++) jlj_write2(gspca_dev, stop_commands[i]); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return gspca_dev->usb_err; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); gspca_dbg(gspca_dev, D_STREAM, "Start streaming at %dx%d\n", gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); jlj_start(gspca_dev); return gspca_dev->usb_err; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0280), .driver_info = SAKAR_57379}, {USB_DEVICE(0x0979, 0x0270), .driver_info = SPORTSCAM_DV15}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_POWER_LINE_FREQUENCY: setfreq(gspca_dev, ctrl->val); break; case V4L2_CID_RED_BALANCE: setred(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: setgreen(gspca_dev, ctrl->val); break; case V4L2_CID_BLUE_BALANCE: setblue(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: setautogain(gspca_dev, ctrl->val); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: jpeg_set_qual(sd->jpeg_hdr, ctrl->val); setcamquality(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; static const struct v4l2_ctrl_config custom_autogain = { .ops = &sd_ctrl_ops, .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Automatic Gain (and Exposure)", .max = 3, .step = 1, .def = 0, }; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 6); sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1, V4L2_CID_POWER_LINE_FREQUENCY_60HZ); v4l2_ctrl_new_custom(hdl, &custom_autogain, NULL); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 3, 1, 2); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 3, 1, 2); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 3, 1, 2); sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, const struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual); jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc_sakar_57379 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* sub-driver description */ static const struct sd_desc sd_desc_sportscam_dv15 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct sd_desc *sd_desc[2] = { &sd_desc_sakar_57379, &sd_desc_sportscam_dv15 }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, sd_desc[id->driver_info], sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
22 22 21 22 22 22 13 13 13 13 22 22 21 22 22 22 22 22 22 22 1 22 22 22 22 22 22 22 22 22 22 22 21 22 22 22 22 22 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer Ports * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> */ #include <sound/core.h> #include <linux/slab.h> #include <linux/module.h> #include "seq_system.h" #include "seq_ports.h" #include "seq_clientmgr.h" /* registration of client ports */ /* NOTE: the current implementation of the port structure as a linked list is not optimal for clients that have many ports. For sending messages to all subscribers of a port we first need to find the address of the port structure, which means we have to traverse the list. A direct access table (array) would be better, but big preallocated arrays waste memory. Possible actions: 1) leave it this way, a client does normaly does not have more than a few ports 2) replace the linked list of ports by a array of pointers which is dynamicly kmalloced. When a port is added or deleted we can simply allocate a new array, copy the corresponding pointers, and delete the old one. We then only need a pointer to this array, and an integer that tells us how much elements are in array. */ /* return pointer to port structure - port is locked if found */ struct snd_seq_client_port *snd_seq_port_use_ptr(struct snd_seq_client *client, int num) { struct snd_seq_client_port *port; if (client == NULL) return NULL; guard(read_lock)(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port == num) { if (port->closing) break; /* deleting now */ snd_use_lock_use(&port->use_lock); return port; } } return NULL; /* not found */ } /* search for the next port - port is locked if found */ struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client, struct snd_seq_port_info *pinfo) { int num; struct snd_seq_client_port *port, *found; bool check_inactive = (pinfo->capability & SNDRV_SEQ_PORT_CAP_INACTIVE); num = pinfo->addr.port; found = NULL; guard(read_lock)(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if ((port->capability & SNDRV_SEQ_PORT_CAP_INACTIVE) && !check_inactive) continue; /* skip inactive ports */ if (port->addr.port < num) continue; if (port->addr.port == num) { found = port; break; } if (found == NULL || port->addr.port < found->addr.port) found = port; } if (found) { if (found->closing) found = NULL; else snd_use_lock_use(&found->use_lock); } return found; } /* initialize snd_seq_port_subs_info */ static void port_subs_info_init(struct snd_seq_port_subs_info *grp) { INIT_LIST_HEAD(&grp->list_head); grp->count = 0; grp->exclusive = 0; rwlock_init(&grp->list_lock); init_rwsem(&grp->list_mutex); grp->open = NULL; grp->close = NULL; } /* create a port, port number or a negative error code is returned * the caller needs to unref the port via snd_seq_port_unlock() appropriately */ int snd_seq_create_port(struct snd_seq_client *client, int port, struct snd_seq_client_port **port_ret) { struct snd_seq_client_port *new_port, *p; int num; *port_ret = NULL; /* sanity check */ if (snd_BUG_ON(!client)) return -EINVAL; if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) { pr_warn("ALSA: seq: too many ports for client %d\n", client->number); return -EINVAL; } /* create a new port */ new_port = kzalloc(sizeof(*new_port), GFP_KERNEL); if (!new_port) return -ENOMEM; /* failure, out of memory */ /* init port data */ new_port->addr.client = client->number; new_port->addr.port = -1; new_port->owner = THIS_MODULE; snd_use_lock_init(&new_port->use_lock); port_subs_info_init(&new_port->c_src); port_subs_info_init(&new_port->c_dest); snd_use_lock_use(&new_port->use_lock); num = max(port, 0); guard(mutex)(&client->ports_mutex); guard(write_lock_irq)(&client->ports_lock); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port == port) { kfree(new_port); return -EBUSY; } if (p->addr.port > num) break; if (port < 0) /* auto-probe mode */ num = p->addr.port + 1; } /* insert the new port */ list_add_tail(&new_port->list, &p->list); client->num_ports++; new_port->addr.port = num; /* store the port number in the port */ sprintf(new_port->name, "port-%d", num); *port_ret = new_port; return num; } /* */ static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, struct snd_seq_client **cp) { struct snd_seq_client_port *p; *cp = snd_seq_client_use_ptr(addr->client); if (*cp) { p = snd_seq_port_use_ptr(*cp, addr->port); if (! p) { snd_seq_client_unlock(*cp); *cp = NULL; } return p; } return NULL; } static void delete_and_unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool ack); static inline struct snd_seq_subscribers * get_subscriber(struct list_head *p, bool is_src) { if (is_src) return list_entry(p, struct snd_seq_subscribers, src_list); else return list_entry(p, struct snd_seq_subscribers, dest_list); } /* * remove all subscribers on the list * this is called from port_delete, for each src and dest list. */ static void clear_subscriber_list(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, int is_src) { struct list_head *p, *n; list_for_each_safe(p, n, &grp->list_head) { struct snd_seq_subscribers *subs; struct snd_seq_client *c; struct snd_seq_client_port *aport; subs = get_subscriber(p, is_src); if (is_src) aport = get_client_port(&subs->info.dest, &c); else aport = get_client_port(&subs->info.sender, &c); delete_and_unsubscribe_port(client, port, subs, is_src, false); if (!aport) { /* looks like the connected port is being deleted. * we decrease the counter, and when both ports are deleted * remove the subscriber info */ if (atomic_dec_and_test(&subs->ref_count)) kfree(subs); continue; } /* ok we got the connected port */ delete_and_unsubscribe_port(c, aport, subs, !is_src, true); kfree(subs); snd_seq_port_unlock(aport); snd_seq_client_unlock(c); } } /* delete port data */ static int port_delete(struct snd_seq_client *client, struct snd_seq_client_port *port) { /* set closing flag and wait for all port access are gone */ port->closing = 1; snd_use_lock_sync(&port->use_lock); /* clear subscribers info */ clear_subscriber_list(client, port, &port->c_src, true); clear_subscriber_list(client, port, &port->c_dest, false); if (port->private_free) port->private_free(port->private_data); snd_BUG_ON(port->c_src.count != 0); snd_BUG_ON(port->c_dest.count != 0); kfree(port); return 0; } /* delete a port with the given port id */ int snd_seq_delete_port(struct snd_seq_client *client, int port) { struct snd_seq_client_port *found = NULL, *p; scoped_guard(mutex, &client->ports_mutex) { guard(write_lock_irq)(&client->ports_lock); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port == port) { /* ok found. delete from the list at first */ list_del(&p->list); client->num_ports--; found = p; break; } } } if (found) return port_delete(client, found); else return -ENOENT; } /* delete the all ports belonging to the given client */ int snd_seq_delete_all_ports(struct snd_seq_client *client) { struct list_head deleted_list; struct snd_seq_client_port *port, *tmp; /* move the port list to deleted_list, and * clear the port list in the client data. */ guard(mutex)(&client->ports_mutex); scoped_guard(write_lock_irq, &client->ports_lock) { if (!list_empty(&client->ports_list_head)) { list_add(&deleted_list, &client->ports_list_head); list_del_init(&client->ports_list_head); } else { INIT_LIST_HEAD(&deleted_list); } client->num_ports = 0; } /* remove each port in deleted_list */ list_for_each_entry_safe(port, tmp, &deleted_list, list) { list_del(&port->list); snd_seq_system_client_ev_port_exit(port->addr.client, port->addr.port); port_delete(client, port); } return 0; } /* set port info fields */ int snd_seq_set_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* set port name */ if (info->name[0]) strscpy(port->name, info->name, sizeof(port->name)); /* set capabilities */ port->capability = info->capability; /* get port type */ port->type = info->type; /* information about supported channels/voices */ port->midi_channels = info->midi_channels; port->midi_voices = info->midi_voices; port->synth_voices = info->synth_voices; /* timestamping */ port->timestamping = (info->flags & SNDRV_SEQ_PORT_FLG_TIMESTAMP) ? 1 : 0; port->time_real = (info->flags & SNDRV_SEQ_PORT_FLG_TIME_REAL) ? 1 : 0; port->time_queue = info->time_queue; /* UMP direction and group */ port->direction = info->direction; port->ump_group = info->ump_group; if (port->ump_group > SNDRV_UMP_MAX_GROUPS) port->ump_group = 0; /* fill default port direction */ if (!port->direction) { if (info->capability & SNDRV_SEQ_PORT_CAP_READ) port->direction |= SNDRV_SEQ_PORT_DIR_INPUT; if (info->capability & SNDRV_SEQ_PORT_CAP_WRITE) port->direction |= SNDRV_SEQ_PORT_DIR_OUTPUT; } port->is_midi1 = !!(info->flags & SNDRV_SEQ_PORT_FLG_IS_MIDI1); return 0; } /* get port info fields */ int snd_seq_get_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* get port name */ strscpy(info->name, port->name, sizeof(info->name)); /* get capabilities */ info->capability = port->capability; /* get port type */ info->type = port->type; /* information about supported channels/voices */ info->midi_channels = port->midi_channels; info->midi_voices = port->midi_voices; info->synth_voices = port->synth_voices; /* get subscriber counts */ info->read_use = port->c_src.count; info->write_use = port->c_dest.count; /* timestamping */ info->flags = 0; if (port->timestamping) { info->flags |= SNDRV_SEQ_PORT_FLG_TIMESTAMP; if (port->time_real) info->flags |= SNDRV_SEQ_PORT_FLG_TIME_REAL; info->time_queue = port->time_queue; } if (port->is_midi1) info->flags |= SNDRV_SEQ_PORT_FLG_IS_MIDI1; /* UMP direction and group */ info->direction = port->direction; info->ump_group = port->ump_group; return 0; } /* * call callback functions (if any): * the callbacks are invoked only when the first (for connection) or * the last subscription (for disconnection) is done. Second or later * subscription results in increment of counter, but no callback is * invoked. * This feature is useful if these callbacks are associated with * initialization or termination of devices (see seq_midi.c). */ static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (!try_module_get(port->owner)) return -EFAULT; grp->count++; if (grp->open && grp->count == 1) { err = grp->open(port->private_data, info); if (err < 0) { module_put(port->owner); grp->count--; } } if (err >= 0 && send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED); return err; } static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (! grp->count) return -EINVAL; grp->count--; if (grp->close && grp->count == 0) err = grp->close(port->private_data, info); if (send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED); module_put(port->owner); return err; } /* check if both addresses are identical */ static inline int addr_match(struct snd_seq_addr *r, struct snd_seq_addr *s) { return (r->client == s->client) && (r->port == s->port); } /* check the two subscribe info match */ /* if flags is zero, checks only sender and destination addresses */ static int match_subs_info(struct snd_seq_port_subscribe *r, struct snd_seq_port_subscribe *s) { if (addr_match(&r->sender, &s->sender) && addr_match(&r->dest, &s->dest)) { if (r->flags && r->flags == s->flags) return r->queue == s->queue; else if (! r->flags) return 1; } return 0; } static int check_and_subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool exclusive, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *p; struct snd_seq_subscribers *s; int err; grp = is_src ? &port->c_src : &port->c_dest; guard(rwsem_write)(&grp->list_mutex); if (exclusive) { if (!list_empty(&grp->list_head)) return -EBUSY; } else { if (grp->exclusive) return -EBUSY; /* check whether already exists */ list_for_each(p, &grp->list_head) { s = get_subscriber(p, is_src); if (match_subs_info(&subs->info, &s->info)) return -EBUSY; } } err = subscribe_port(client, port, grp, &subs->info, ack); if (err < 0) { grp->exclusive = 0; return err; } /* add to list */ guard(write_lock_irq)(&grp->list_lock); if (is_src) list_add_tail(&subs->src_list, &grp->list_head); else list_add_tail(&subs->dest_list, &grp->list_head); grp->exclusive = exclusive; atomic_inc(&subs->ref_count); return 0; } /* called with grp->list_mutex held */ static void __delete_and_unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *list; bool empty; grp = is_src ? &port->c_src : &port->c_dest; list = is_src ? &subs->src_list : &subs->dest_list; scoped_guard(write_lock_irq, &grp->list_lock) { empty = list_empty(list); if (!empty) list_del_init(list); grp->exclusive = 0; } if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); } static void delete_and_unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool ack) { struct snd_seq_port_subs_info *grp; grp = is_src ? &port->c_src : &port->c_dest; guard(rwsem_write)(&grp->list_mutex); __delete_and_unsubscribe_port(client, port, subs, is_src, ack); } /* connect two ports */ int snd_seq_port_connect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_subscribers *subs; bool exclusive; int err; subs = kzalloc(sizeof(*subs), GFP_KERNEL); if (!subs) return -ENOMEM; subs->info = *info; atomic_set(&subs->ref_count, 0); INIT_LIST_HEAD(&subs->src_list); INIT_LIST_HEAD(&subs->dest_list); exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE); err = check_and_subscribe_port(src_client, src_port, subs, true, exclusive, connector->number != src_client->number); if (err < 0) goto error; err = check_and_subscribe_port(dest_client, dest_port, subs, false, exclusive, connector->number != dest_client->number); if (err < 0) goto error_dest; return 0; error_dest: delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); error: kfree(subs); return err; } /* remove the connection */ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_port_subs_info *dest = &dest_port->c_dest; struct snd_seq_subscribers *subs; int err = -ENOENT; /* always start from deleting the dest port for avoiding concurrent * deletions */ scoped_guard(rwsem_write, &dest->list_mutex) { /* look for the connection */ list_for_each_entry(subs, &dest->list_head, dest_list) { if (match_subs_info(info, &subs->info)) { __delete_and_unsubscribe_port(dest_client, dest_port, subs, false, connector->number != dest_client->number); err = 0; break; } } } if (err < 0) return err; delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); kfree(subs); return 0; } /* get matched subscriber */ int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, struct snd_seq_addr *dest_addr, struct snd_seq_port_subscribe *subs) { struct snd_seq_subscribers *s; int err = -ENOENT; guard(rwsem_read)(&src_grp->list_mutex); list_for_each_entry(s, &src_grp->list_head, src_list) { if (addr_match(dest_addr, &s->info.dest)) { *subs = s->info; err = 0; break; } } return err; } /* * Attach a device driver that wants to receive events from the * sequencer. Returns the new port number on success. * A driver that wants to receive the events converted to midi, will * use snd_seq_midisynth_register_port(). */ /* exported */ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp, int cap, int type, int midi_channels, int midi_voices, char *portname) { struct snd_seq_port_info portinfo; int ret; /* Set up the port */ memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; strscpy(portinfo.name, portname ? portname : "Unnamed port", sizeof(portinfo.name)); portinfo.capability = cap; portinfo.type = type; portinfo.kernel = pcbp; portinfo.midi_channels = midi_channels; portinfo.midi_voices = midi_voices; /* Create it */ ret = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, &portinfo); if (ret >= 0) ret = portinfo.addr.port; return ret; } EXPORT_SYMBOL(snd_seq_event_port_attach); /* * Detach the driver from a port. */ /* exported */ int snd_seq_event_port_detach(int client, int port) { struct snd_seq_port_info portinfo; int err; memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; portinfo.addr.port = port; err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, &portinfo); return err; } EXPORT_SYMBOL(snd_seq_event_port_detach);
11 15 11 11 17 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H #include <linux/bug.h> #include <linux/list.h> #include <linux/signal_types.h> #include <linux/string.h> struct task_struct; /* for sysctl */ extern int print_fatal_signals; static inline void copy_siginfo(kernel_siginfo_t *to, const kernel_siginfo_t *from) { memcpy(to, from, sizeof(*to)); } static inline void clear_siginfo(kernel_siginfo_t *info) { memset(info, 0, sizeof(*info)); } #define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) static inline void copy_siginfo_to_external(siginfo_t *to, const kernel_siginfo_t *from) { memcpy(to, from, sizeof(*from)); memset(((char *)to) + sizeof(struct kernel_siginfo), 0, SI_EXPANSION_SIZE); } int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); enum siginfo_layout { SIL_KILL, SIL_TIMER, SIL_POLL, SIL_FAULT, SIL_FAULT_TRAPNO, SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, SIL_FAULT_PERF_EVENT, SIL_CHLD, SIL_RT, SIL_SYS, }; enum siginfo_layout siginfo_layout(unsigned sig, int si_code); /* * Define some primitives to manipulate sigset_t. */ #ifndef __HAVE_ARCH_SIG_BITOPS #include <linux/bitops.h> /* We don't use <linux/bitops.h> for these because there is no need to be atomic. */ static inline void sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) set->sig[0] |= 1UL << sig; else set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW); } static inline void sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) set->sig[0] &= ~(1UL << sig); else set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW)); } static inline int sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) return 1 & (set->sig[0] >> sig); else return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } #endif /* __HAVE_ARCH_SIG_BITOPS */ static inline int sigisemptyset(sigset_t *set) { switch (_NSIG_WORDS) { case 4: return (set->sig[3] | set->sig[2] | set->sig[1] | set->sig[0]) == 0; case 2: return (set->sig[1] | set->sig[0]) == 0; case 1: return set->sig[0] == 0; default: BUILD_BUG(); return 0; } } static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) { switch (_NSIG_WORDS) { case 4: return (set1->sig[3] == set2->sig[3]) && (set1->sig[2] == set2->sig[2]) && (set1->sig[1] == set2->sig[1]) && (set1->sig[0] == set2->sig[0]); case 2: return (set1->sig[1] == set2->sig[1]) && (set1->sig[0] == set2->sig[0]); case 1: return set1->sig[0] == set2->sig[0]; } return 0; } #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS #define _SIG_SET_BINOP(name, op) \ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ { \ unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ \ switch (_NSIG_WORDS) { \ case 4: \ a3 = a->sig[3]; a2 = a->sig[2]; \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ fallthrough; \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ fallthrough; \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ break; \ default: \ BUILD_BUG(); \ } \ } #define _sig_or(x,y) ((x) | (y)) _SIG_SET_BINOP(sigorsets, _sig_or) #define _sig_and(x,y) ((x) & (y)) _SIG_SET_BINOP(sigandsets, _sig_and) #define _sig_andn(x,y) ((x) & ~(y)) _SIG_SET_BINOP(sigandnsets, _sig_andn) #undef _SIG_SET_BINOP #undef _sig_or #undef _sig_and #undef _sig_andn #define _SIG_SET_OP(name, op) \ static inline void name(sigset_t *set) \ { \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ fallthrough; \ case 2: set->sig[1] = op(set->sig[1]); \ fallthrough; \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ BUILD_BUG(); \ } \ } #define _sig_not(x) (~(x)) _SIG_SET_OP(signotset, _sig_not) #undef _SIG_SET_OP #undef _sig_not static inline void sigemptyset(sigset_t *set) { switch (_NSIG_WORDS) { default: memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; fallthrough; case 1: set->sig[0] = 0; break; } } static inline void sigfillset(sigset_t *set) { switch (_NSIG_WORDS) { default: memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; fallthrough; case 1: set->sig[0] = -1; break; } } /* Some extensions for manipulating the low 32 signals in particular. */ static inline void sigaddsetmask(sigset_t *set, unsigned long mask) { set->sig[0] |= mask; } static inline void sigdelsetmask(sigset_t *set, unsigned long mask) { set->sig[0] &= ~mask; } static inline int sigtestsetmask(sigset_t *set, unsigned long mask) { return (set->sig[0] & mask) != 0; } static inline void siginitset(sigset_t *set, unsigned long mask) { set->sig[0] = mask; switch (_NSIG_WORDS) { default: memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = 0; break; case 1: ; } } static inline void siginitsetinv(sigset_t *set, unsigned long mask) { set->sig[0] = ~mask; switch (_NSIG_WORDS) { default: memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = -1; break; case 1: ; } } #endif /* __HAVE_ARCH_SIG_SETOPS */ static inline void init_sigpending(struct sigpending *sig) { sigemptyset(&sig->signal); INIT_LIST_HEAD(&sig->list); } extern void flush_sigqueue(struct sigpending *queue); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) { return sig <= _NSIG ? 1 : 0; } struct timespec; struct pt_regs; enum pid_type; extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int send_signal_locked(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; extern bool get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); extern void kernel_sigaction(int, __sighandler_t); #define SIG_KTHREAD ((__force __sighandler_t)2) #define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) static inline void allow_signal(int sig) { /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ kernel_sigaction(sig, SIG_KTHREAD); } static inline void allow_kernel_signal(int sig) { /* * Kernel threads handle their own signals. Let the signal code * know signals sent by the kernel will be handled, so that they * don't get silently dropped. */ kernel_sigaction(sig, SIG_KTHREAD_KERNEL); } static inline void disallow_signal(int sig) { kernel_sigaction(sig, SIG_IGN); } extern struct kmem_cache *sighand_cachep; extern bool unhandled_signal(struct task_struct *tsk, int sig); /* * In POSIX a signal is sent either to a specific thread (Linux task) * or to the process as a whole (Linux thread group). How the signal * is sent determines whether it's to one thread or the whole group, * which determines which signal mask(s) are involved in blocking it * from being delivered until later. When the signal is delivered, * either it's caught or ignored by a user handler or it has a default * effect that applies to the whole thread group (POSIX process). * * The possible effects an unblocked signal set to SIG_DFL can have are: * ignore - Nothing Happens * terminate - kill the process, i.e. all threads in the group, * similar to exit_group. The group leader (only) reports * WIFSIGNALED status to its parent. * coredump - write a core dump file describing all threads using * the same mm and then kill all those threads * stop - stop all the threads in the group, i.e. TASK_STOPPED state * * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. * Other signals when not blocked and set to SIG_DFL behaves as follows. * The job control signals also have other special effects. * * +--------------------+------------------+ * | POSIX signal | default action | * +--------------------+------------------+ * | SIGHUP | terminate | * | SIGINT | terminate | * | SIGQUIT | coredump | * | SIGILL | coredump | * | SIGTRAP | coredump | * | SIGABRT/SIGIOT | coredump | * | SIGBUS | coredump | * | SIGFPE | coredump | * | SIGKILL | terminate(+) | * | SIGUSR1 | terminate | * | SIGSEGV | coredump | * | SIGUSR2 | terminate | * | SIGPIPE | terminate | * | SIGALRM | terminate | * | SIGTERM | terminate | * | SIGCHLD | ignore | * | SIGCONT | ignore(*) | * | SIGSTOP | stop(*)(+) | * | SIGTSTP | stop(*) | * | SIGTTIN | stop(*) | * | SIGTTOU | stop(*) | * | SIGURG | ignore | * | SIGXCPU | coredump | * | SIGXFSZ | coredump | * | SIGVTALRM | terminate | * | SIGPROF | terminate | * | SIGPOLL/SIGIO | terminate | * | SIGSYS/SIGUNUSED | coredump | * | SIGSTKFLT | terminate | * | SIGWINCH | ignore | * | SIGPWR | terminate | * | SIGRTMIN-SIGRTMAX | terminate | * +--------------------+------------------+ * | non-POSIX signal | default action | * +--------------------+------------------+ * | SIGEMT | coredump | * +--------------------+------------------+ * * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". * (*) Special job control effects: * When SIGCONT is sent, it resumes the process (all threads in the group) * from TASK_STOPPED state and also clears any pending/queued stop signals * (any of those marked with "stop(*)"). This happens regardless of blocking, * catching, or ignoring SIGCONT. When any stop signal is sent, it clears * any pending/queued SIGCONT signals; this happens regardless of blocking, * catching, or ignored the stop signal, though (except for SIGSTOP) the * default action of stopping the process may happen later or never. */ #ifdef SIGEMT #define SIGEMT_MASK rt_sigmask(SIGEMT) #else #define SIGEMT_MASK 0 #endif #if SIGRTMIN > BITS_PER_LONG #define rt_sigmask(sig) (1ULL << ((sig)-1)) #else #define rt_sigmask(sig) sigmask(sig) #endif #define siginmask(sig, mask) \ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) #define SIG_KERNEL_STOP_MASK (\ rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \ rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) ) #define SIG_KERNEL_COREDUMP_MASK (\ rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \ rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \ rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \ rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \ rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \ SIGEMT_MASK ) #define SIG_KERNEL_IGNORE_MASK (\ rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) #define SIG_SPECIFIC_SICODES_MASK (\ rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \ rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \ rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \ SIGEMT_MASK ) #define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) #define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) #define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) #define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) #define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK) #define sig_fatal(t, signr) \ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) void signals_init(void); int restore_altstack(const stack_t __user *); int __save_altstack(stack_t __user *, unsigned long); #define unsafe_save_altstack(uss, sp, label) do { \ stack_t __user *__uss = uss; \ struct task_struct *t = current; \ unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ } while (0); #ifdef CONFIG_DYNAMIC_SIGFRAME bool sigaltstack_size_valid(size_t ss_size); #else static inline bool sigaltstack_size_valid(size_t size) { return true; } #endif /* !CONFIG_DYNAMIC_SIGFRAME */ #ifdef CONFIG_PROC_FS struct seq_file; extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); #endif #ifndef arch_untagged_si_addr /* * Given a fault address and a signal and si_code which correspond to the * _sigfault union member, returns the address that must appear in si_addr if * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags. */ static inline void __user *arch_untagged_si_addr(void __user *addr, unsigned long sig, unsigned long si_code) { return addr; } #endif #endif /* _LINUX_SIGNAL_H */
3155 3193 3196 3189 40 40 3187 3193 3189 3191 3190 3195 3187 3186 3195 3186 3189 3156 40 3161 3157 99 100 100 100 70 70 100 99 7 115 35 34 35 10 11 24 114 1 1 1 1 1 1 1 1 1 1 1 3153 3154 216 3161 210 3164 2161 114 2162 2111 2164 2158 2161 617 2 613 2091 2086 612 2156 2158 2160 2154 2157 21 2145 80 2069 2817 2822 2815 2159 2164 2084 2082 2155 2815 3156 2133 2135 2068 2076 2139 2932 3154 3155 1 1 3154 54 54 54 54 51 51 54 85 81 46 85 6 1756 3120 3117 1756 3118 3062 3062 2 1838 3063 3158 3163 3161 3163 3072 3161 3160 3072 3166 3166 3159 6 6 6 1905 1421 1425 1428 3072 3068 3070 3078 3071 3066 3068 3058 3063 3068 3058 3063 3063 3070 3076 3070 3161 3160 3163 3163 3163 3158 3165 3149 1878 535 533 18 518 289 56 237 238 237 147 237 238 236 4 3 235 85 173 33 33 173 3 2 173 535 144 19 19 19 19 6 13 13 13 4 13 13 13 13 19 3059 3059 3058 3151 3141 3147 3149 20 6 5 4 1 4 4 4 3157 3159 3155 3162 3154 3154 3156 3153 3162 3146 3162 3157 3154 3162 3156 3158 3150 3154 3146 571 3145 3154 3148 3153 3153 3 3 2 3155 3 3153 3153 3124 3154 3119 3144 3143 3153 3149 3117 3123 3130 3153 3158 2 2 2 2 2 156 138 138 1 138 42 136 128 2 3 2 112 111 7 6 121 120 6 6 5 4 5 4 8 7 48 47 8 7 8 8 1 8 134 138 129 154 156 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 // SPDX-License-Identifier: GPL-2.0 /* * message.c - synchronous message handling * * Released under the GPLv2 only. */ #include <linux/acpi.h> #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/usb/cdc.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/of.h> #include <asm/byteorder.h> #include "usb.h" static void cancel_async_set_config(struct usb_device *udev); struct api_context { struct completion done; int status; }; static void usb_api_blocking_completion(struct urb *urb) { struct api_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } /* * Starts urb and waits for completion or timeout. Note that this call * is NOT interruptible. Many device driver i/o requests should be * interruptible and therefore these drivers should implement their * own interruptible routines. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) { struct api_context ctx; unsigned long expire; int retval; init_completion(&ctx.done); urb->context = &ctx; urb->actual_length = 0; retval = usb_submit_urb(urb, GFP_NOIO); if (unlikely(retval)) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&ctx.done, expire)) { usb_kill_urb(urb); retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); dev_dbg(&urb->dev->dev, "%s timed out on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length); } else retval = ctx.status; out: if (actual_length) *actual_length = urb->actual_length; usb_free_urb(urb); return retval; } /*-------------------------------------------------------------------*/ /* returns status (negative) or length (positive) */ static int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) return retv; else return length; } /** * usb_control_msg - Builds a control urb, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: If successful, the number of bytes transferred. Otherwise, a negative * error number. */ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); kfree(dr); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg); /** * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is not * expected to fill in a response (i.e. a "send message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else, * as it will not be modified at all. This does not have the restriction that * usb_control_msg() has where the data pointer must be to dynamically allocated * memory (i.e. memory that can be successfully DMAed to a device). * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_sndctrlpipe(dev, endpoint); int ret; u8 *data = NULL; if (size) { data = kmemdup(driver_data, size, memflags); if (!data) return -ENOMEM; } ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); kfree(data); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(usb_control_msg_send); /** * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to be filled in by the message * @size: length in bytes of the data to be received * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is * expected to fill in a response (i.e. a "receive message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else * that can be successfully written to. This function does not have the * restriction that usb_control_msg() has where the data pointer must be to * dynamically allocated memory (i.e. memory that can be successfully DMAed to a * device). * * The "whole" message must be properly received from the device in order for * this function to be successful. If a device returns less than the expected * amount of data, then the function will fail. Do not use this for messages * where a variable amount of data might be returned. * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); int ret; u8 *data; if (!size || !driver_data) return -EINVAL; data = kmalloc(size, memflags); if (!data) return -ENOMEM; ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); if (ret < 0) goto exit; if (ret == size) { memcpy(driver_data, data, size); ret = 0; } else { ret = -EREMOTEIO; } exit: kfree(data); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg_recv); /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. */ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); } EXPORT_SYMBOL_GPL(usb_interrupt_msg); /** * usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for * interrupt endpoints. We will take the liberty of creating an interrupt URB * (with the default interval) if the target is an interrupt endpoint. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. * */ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { struct urb *urb; struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(usb_dev, pipe); if (!ep || len < 0) return -EINVAL; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL, ep->desc.bInterval); } else usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); return usb_start_wait_urb(urb, timeout, actual_length); } EXPORT_SYMBOL_GPL(usb_bulk_msg); /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs[io->entries]); kfree(io->urbs); io->urbs = NULL; } io->dev = NULL; } static void sg_complete(struct urb *urb) { unsigned long flags; struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock_irqsave(&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock_irqrestore(&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i]) continue; if (found) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock_irqsave(&io->lock, flags); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } /** * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request * @io: request block being initialized. until usb_sg_wait() returns, * treat this as a pointer to an opaque block of memory, * @dev: the usb device that will send or receive the data * @pipe: endpoint "pipe" used to transfer the data * @period: polling rate for interrupt endpoints, in frames or * (for high speed endpoints) microframes; ignored for bulk * @sg: scatterlist entries * @nents: how many entries in the scatterlist * @length: how many bytes to send from the scatterlist, or zero to * send every byte identified in the list. * @mem_flags: SLAB_* flags affecting memory allocations in this call * * This initializes a scatter/gather request, allocating resources such as * I/O mappings and urb memory (except maybe memory used by USB controller * drivers). * * The request must be issued using usb_sg_wait(), which waits for the I/O to * complete (or to be canceled) and then cleans up all resources allocated by * usb_sg_init(). * * The request may be canceled with usb_sg_cancel(), either before or after * usb_sg_wait() is called. * * Return: Zero for success, else a negative errno value. */ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int use_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || nents <= 0) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; if (dev->bus->sg_tablesize > 0) { use_sg = true; io->entries = 1; } else { use_sg = false; io->entries = nents; } /* initialize all the urbs we'll use */ io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; for_each_sg(sg, sg, io->entries, i) { struct urb *urb; unsigned len; urb = usb_alloc_urb(0, mem_flags); if (!urb) { io->entries = i; goto nomem; } io->urbs[i] = urb; urb->dev = NULL; urb->pipe = pipe; urb->interval = period; urb->transfer_flags = urb_flags; urb->complete = sg_complete; urb->context = io; urb->sg = sg; if (use_sg) { /* There is no single transfer buffer */ urb->transfer_buffer = NULL; urb->num_sgs = nents; /* A length of zero means transfer the whole sg list */ len = length; if (len == 0) { struct scatterlist *sg2; int j; for_each_sg(sg, sg2, nents, j) len += sg2->length; } } else { /* * Some systems can't use DMA; they use PIO instead. * For their sakes, transfer_buffer is set whenever * possible. */ if (!PageHighMem(sg_page(sg))) urb->transfer_buffer = sg_virt(sg); else urb->transfer_buffer = NULL; len = sg->length; if (length) { len = min_t(size_t, len, length); length -= len; if (length == 0) io->entries = i + 1; } } urb->transfer_buffer_length = len; } io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: sg_clean(io); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_sg_init); /** * usb_sg_wait - synchronously execute scatter/gather request * @io: request block handle, as initialized with usb_sg_init(). * some fields become accessible when this call returns. * * Context: task context, might sleep. * * This function blocks until the specified I/O operation completes. It * leverages the grouping of the related I/O requests to get good transfer * rates, by queueing the requests. At higher speeds, such queuing can * significantly improve USB throughput. * * There are three kinds of completion for this function. * * (1) success, where io->status is zero. The number of io->bytes * transferred is as requested. * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or * this call will have been freed. The request block parameter may still be * passed to usb_sg_cancel(), or it may be freed. It could also be * reinitialized and then reused. * * Data Transfer Rates: * * Bulk transfers are valid for full or high speed endpoints. * The best full speed data rate is 19 packets of 64 bytes each * per frame, or 1216 bytes per millisecond. * The best high speed data rate is 13 packets of 512 bytes each * per microframe, or 52 KBytes per millisecond. * * The reason to use interrupt transfers through this API would most likely * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond * could be transferred. That capability is less useful for low or full * speed interrupt endpoints, which allow at most one packet per millisecond, * of at most 8 or 64 bytes (respectively). * * It is not necessary to call this function to reserve bandwidth for devices * under an xHCI host controller, as the bandwidth is reserved when the * configuration or interface alt setting is selected. */ void usb_sg_wait(struct usb_sg_request *io) { int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; spin_unlock_irq(&io->lock); retval = usb_submit_urb(io->urbs[i], GFP_NOIO); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); /* OK, yes, this could be packaged as non-blocking. * So could the submit loop above ... but it's easier to * solve neither problem than to solve both! */ wait_for_completion(&io->complete); sg_clean(io); } EXPORT_SYMBOL_GPL(usb_sg_wait); /** * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() * @io: request block, initialized with usb_sg_init() * * This stops a request after it has been started by usb_sg_wait(). * It can also prevents one initialized by usb_sg_init() from starting, * so that call just frees resources allocated to the request. */ void usb_sg_cancel(struct usb_sg_request *io) { unsigned long flags; int i, retval; spin_lock_irqsave(&io->lock, flags); if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } spin_lock_irqsave(&io->lock, flags); io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); /*-------------------------------------------------------------------*/ /** * usb_get_descriptor - issues a generic GET_DESCRIPTOR request * @dev: the device whose descriptor is being retrieved * @type: the descriptor type (USB_DT_*) * @index: the number of the descriptor * @buf: where to put the descriptor * @size: how big is "buf"? * * Context: task context, might sleep. * * Gets a USB descriptor. Convenience functions exist to simplify * getting some types of descriptors. Use * usb_get_string() or usb_string() for USB_DT_STRING. * Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG) * are part of the device structure. * In addition to a number of USB-standard descriptors, some * devices also use class-specific or vendor-specific descriptors. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; memset(buf, 0, size); /* Make sure we parse really received data */ for (i = 0; i < 3; ++i) { /* retry on length 0 or error; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (type << 8) + index, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (result <= 0 && result != -ETIMEDOUT) continue; if (result > 1 && ((u8 *)buf)[1] != type) { result = -ENODATA; continue; } break; } return result; } EXPORT_SYMBOL_GPL(usb_get_descriptor); /** * usb_get_string - gets a string descriptor * @dev: the device whose string descriptor is being retrieved * @langid: code for language chosen (from string descriptor zero) * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character, * in little-endian byte order). * The usb_string() function will often be a convenient way to turn * these strings into kernel-printable form. * * Strings may be referenced in device, configuration, interface, or other * descriptors, and could also be used in vendor-specific ways. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ static int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; for (i = 0; i < 3; ++i) { /* retry on length 0 or stall; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + index, langid, buf, size, USB_CTRL_GET_TIMEOUT); if (result == 0 || result == -EPIPE) continue; if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) { result = -ENODATA; continue; } break; } return result; } static void usb_try_string_workarounds(unsigned char *buf, int *length) { int newlength, oldlength = *length; for (newlength = 2; newlength + 1 < oldlength; newlength += 2) if (!isprint(buf[newlength]) || buf[newlength + 1]) break; if (newlength > 2) { buf[0] = newlength; *length = newlength; } } static int usb_string_sub(struct usb_device *dev, unsigned int langid, unsigned int index, unsigned char *buf) { int rc; /* Try to read the string descriptor by asking for the maximum * possible number of bytes */ if (dev->quirks & USB_QUIRK_STRING_FETCH_255) rc = -EIO; else rc = usb_get_string(dev, langid, index, buf, 255); /* If that failed try to read the descriptor length, then * ask for just that many bytes */ if (rc < 2) { rc = usb_get_string(dev, langid, index, buf, 2); if (rc == 2) rc = usb_get_string(dev, langid, index, buf, buf[0]); } if (rc >= 2) { if (!buf[0] && !buf[1]) usb_try_string_workarounds(buf, &rc); /* There might be extra junk at the end of the descriptor */ if (buf[0] < rc) rc = buf[0]; rc = rc - (rc & 1); /* force a multiple of two */ } if (rc < 2) rc = (rc < 0 ? rc : -EINVAL); return rc; } static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) { int err; if (dev->have_langid) return 0; if (dev->string_langid < 0) return -EPIPE; err = usb_string_sub(dev, 0, 0, tbuf); /* If the string was reported but is malformed, default to english * (0x0409) */ if (err == -ENODATA || (err > 0 && err < 4)) { dev->string_langid = 0x0409; dev->have_langid = 1; dev_err(&dev->dev, "language id specifier not provided by device, defaulting to English\n"); return 0; } /* In case of all other errors, we assume the device is not able to * deal with strings at all. Set string_langid to -1 in order to * prevent any string to be retrieved from the device */ if (err < 0) { dev_info(&dev->dev, "string descriptor 0 read error: %d\n", err); dev->string_langid = -1; return -EPIPE; } /* always use the first langid listed */ dev->string_langid = tbuf[2] | (tbuf[3] << 8); dev->have_langid = 1; dev_dbg(&dev->dev, "default language 0x%04x\n", dev->string_langid); return 0; } /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * This converts the UTF-16LE encoded strings returned by devices, from * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones * that are more usable in most kernel contexts. Note that this function * chooses strings in the first language supported by the device. * * This call is synchronous, and may not be used in an interrupt context. * * Return: length of the string (>= 0) or usb_control_msg status (< 0). */ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) { unsigned char *tbuf; int err; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; if (index <= 0 || index >= 256) return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; err = usb_get_langid(dev, tbuf); if (err < 0) goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) goto errout; size--; /* leave room for trailing NULL char in output buffer */ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2, UTF16_LITTLE_ENDIAN, buf, size); buf[err] = 0; if (tbuf[1] != USB_DT_STRING) dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf); errout: kfree(tbuf); return err; } EXPORT_SYMBOL_GPL(usb_string); /* one UTF-8-encoded 16-bit character has at most three bytes */ #define MAX_USB_STRING_SIZE (127 * 3 + 1) /** * usb_cache_string - read a string descriptor and cache it for later use * @udev: the device whose string descriptor is being read * @index: the descriptor index * * Return: A pointer to a kmalloc'ed buffer containing the descriptor string, * or %NULL if the index is 0 or the string could not be read. */ char *usb_cache_string(struct usb_device *udev, int index) { char *buf; char *smallbuf = NULL; int len; if (index <= 0) return NULL; buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); if (len > 0) { smallbuf = kmalloc(++len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); } kfree(buf); } return smallbuf; } EXPORT_SYMBOL_GPL(usb_cache_string); /* * usb_get_device_descriptor - read the device descriptor * @udev: the device whose device descriptor should be read * * Context: task context, might sleep. * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * * Returns: a pointer to a dynamically allocated usb_device_descriptor * structure (which the caller must deallocate), or an ERR_PTR value. */ struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) return ERR_PTR(-ENOMEM); ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); if (ret == sizeof(*desc)) return desc; if (ret >= 0) ret = -EMSGSIZE; kfree(desc); return ERR_PTR(ret); } /* * usb_set_isoch_delay - informs the device of the packet transmit delay * @dev: the device whose delay is to be informed * Context: task context, might sleep * * Since this is an optional request, we don't bother if it fails. */ int usb_set_isoch_delay(struct usb_device *dev) { /* skip hub devices */ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; /* skip non-SS/non-SSP devices */ if (dev->speed < USB_SPEED_SUPER) return 0; return usb_control_msg_send(dev, 0, USB_REQ_SET_ISOCH_DELAY, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, dev->hub_delay, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); } /** * usb_get_status - issues a GET_STATUS call * @dev: the device whose status is being checked * @recip: USB_RECIP_*; for device, interface, or endpoint * @type: USB_STATUS_TYPE_*; for standard or PTM status types * @target: zero (for device), else interface or endpoint number * @data: pointer to two bytes of bitmap data * * Context: task context, might sleep. * * Returns device, interface, or endpoint status. Normally only of * interest to see if the device is self powered, or has enabled the * remote wakeup facility; or whether a bulk or interrupt endpoint * is halted ("stalled"). * * Bits in these status bitmaps are set using the SET_FEATURE request, * and cleared using the CLEAR_FEATURE request. The usb_clear_halt() * function should be used to clear halt ("stall") status. * * This call is synchronous, and may not be used in an interrupt context. * * Returns 0 and the status value in *@data (in host byte order) on success, * or else the status code from the underlying usb_control_msg() call. */ int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data) { int ret; void *status; int length; switch (type) { case USB_STATUS_TYPE_STANDARD: length = 2; break; case USB_STATUS_TYPE_PTM: if (recip != USB_RECIP_DEVICE) return -EINVAL; length = 4; break; default: return -EINVAL; } status = kmalloc(length, GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD, target, status, length, USB_CTRL_GET_TIMEOUT); switch (ret) { case 4: if (type != USB_STATUS_TYPE_PTM) { ret = -EIO; break; } *(u32 *) data = le32_to_cpu(*(__le32 *) status); ret = 0; break; case 2: if (type != USB_STATUS_TYPE_STANDARD) { ret = -EIO; break; } *(u16 *) data = le16_to_cpu(*(__le16 *) status); ret = 0; break; default: ret = -EIO; } kfree(status); return ret; } EXPORT_SYMBOL_GPL(usb_get_status); /** * usb_clear_halt - tells device to clear endpoint halt/stall condition * @dev: device whose endpoint is halted * @pipe: endpoint "pipe" being cleared * * Context: task context, might sleep. * * This is used to clear halt conditions for bulk and interrupt endpoints, * as reported by URB completion status. Endpoints that are halted are * sometimes referred to as being "stalled". Such endpoints are unable * to transmit or receive data until the halt status is cleared. Any URBs * queued for such an endpoint should normally be unlinked by the driver * before clearing the halt condition, as described in sections 5.7.5 * and 5.8.5 of the USB 2.0 spec. * * Note that control and isochronous endpoints don't halt, although control * endpoints report "protocol stall" (for unsupported requests) using the * same status code used to report a true stall. * * This call is synchronous, and may not be used in an interrupt context. * If a thread in your driver uses this call, make sure your disconnect() * method can wait for it to complete. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_clear_halt(struct usb_device *dev, int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; /* we don't care if it wasn't halted first. in fact some devices * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ result = usb_control_msg_send(dev, 0, USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); /* don't un-halt or force to DATA0 except on success */ if (result) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying * the clear "took", so some devices could lock up if you check... * such as the Hagiwara FlashGate DUAL. So we won't bother. * * NOTE: make sure the logic here doesn't diverge much from * the copy in usb-storage, for as long as we need two copies. */ usb_reset_endpoint(dev, endp); return 0; } EXPORT_SYMBOL_GPL(usb_clear_halt); static int create_intf_ep_devs(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; int i; if (intf->ep_devs_created || intf->unregistering) return 0; for (i = 0; i < alt->desc.bNumEndpoints; ++i) (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev); intf->ep_devs_created = 1; return 0; } static void remove_intf_ep_devs(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; int i; if (!intf->ep_devs_created) return; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_remove_ep_devs(&alt->endpoint[i]); intf->ep_devs_created = 0; } /** * usb_disable_endpoint -- Disable an endpoint by address * @dev: the device whose endpoint is being disabled * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables the endpoint for URB submission and nukes all pending URBs. * If @reset_hardware is set then also deallocates hcd/hardware state * for the endpoint. */ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (!dev) return; if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { ep->enabled = 0; usb_hcd_flush_endpoint(dev, ep); if (reset_hardware) usb_hcd_disable_endpoint(dev, ep); } } /** * usb_reset_endpoint - Reset an endpoint's state. * @dev: the device whose endpoint is to be reset * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * * Resets any host-side endpoint state such as the toggle bit, * sequence number or current window. */ void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (usb_endpoint_out(epaddr)) ep = dev->ep_out[epnum]; else ep = dev->ep_in[epnum]; if (ep) usb_hcd_reset_endpoint(dev, ep); } EXPORT_SYMBOL_GPL(usb_reset_endpoint); /** * usb_disable_interface -- Disable all endpoints for an interface * @dev: the device whose interface is being disabled * @intf: pointer to the interface descriptor * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables all the endpoints for the interface's current altsetting. */ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { usb_disable_endpoint(dev, alt->endpoint[i].desc.bEndpointAddress, reset_hardware); } } /* * usb_disable_device_endpoints -- Disable all endpoints for a device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. */ static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) { struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i; if (hcd->driver->check_bandwidth) { /* First pass: Cancel URBs, leave endpoint pointers intact. */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, false); usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); mutex_unlock(hcd->bandwidth_mutex); } /* Second pass: remove endpoint pointers */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } } /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. * * Disables all the device's endpoints, potentially including endpoint 0. * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ if (dev->actconfig) { /* * FIXME: In order to avoid self-deadlock involving the * bandwidth_mutex, we have to mark all the interfaces * before unregistering any of them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) dev->actconfig->interface[i]->unregistering = 1; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *interface; /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; if (!device_is_registered(&interface->dev)) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); remove_intf_ep_devs(interface); device_del(&interface->dev); } /* Now that the interfaces are unbound, nobody should * try to access them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { put_device(&dev->actconfig->interface[i]->dev); dev->actconfig->interface[i] = NULL; } usb_disable_usb2_hardware_lpm(dev); usb_unlocked_disable_lpm(dev); usb_disable_ltm(dev); dev->actconfig = NULL; if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); usb_disable_device_endpoints(dev, skip_ep0); } /** * usb_enable_endpoint - Enable an endpoint for USB communications * @dev: the device whose interface is being enabled * @ep: the endpoint * @reset_ep: flag to reset the endpoint state * * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. * For control endpoints, both the input and output sides are handled. */ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_ep) { int epnum = usb_endpoint_num(&ep->desc); int is_out = usb_endpoint_dir_out(&ep->desc); int is_control = usb_endpoint_xfer_control(&ep->desc); if (reset_ep) usb_hcd_reset_endpoint(dev, ep); if (is_out || is_control) dev->ep_out[epnum] = ep; if (!is_out || is_control) dev->ep_in[epnum] = ep; ep->enabled = 1; } /** * usb_enable_interface - Enable all the endpoints for an interface * @dev: the device whose interface is being enabled * @intf: pointer to the interface descriptor * @reset_eps: flag to reset the endpoints' state * * Enables all the endpoints for the interface's current altsetting. */ void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_eps) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); } /** * usb_set_interface - Makes a particular alternate setting be current * @dev: the device whose interface is being updated * @interface: the interface being updated * @alternate: the setting being chosen. * * Context: task context, might sleep. * * This is used to enable data transfers on interfaces that may not * be enabled by default. Not all devices support such configurability. * Only the driver bound to an interface may change its setting. * * Within any given configuration, each interface may have several * alternative settings. These are often used to control levels of * bandwidth consumption. For example, the default setting for a high * speed interrupt endpoint may not send more than 64 bytes per microframe, * while interrupt transfers of up to 3KBytes per microframe are legal. * Also, isochronous endpoints may never be part of an * interface's default setting. To access such bandwidth, alternate * interface settings must be made current. * * Note that in the Linux USB subsystem, bandwidth associated with * an endpoint in a given alternate setting is not reserved until an URB * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * * xHCI reserves bandwidth and configures the alternate setting in * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting * may be disabled. Drivers cannot rely on any particular alternate * setting being in effect after a failure. * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed * (perhaps forced by unlinking). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_set_interface(struct usb_device *dev, int interface, int alternate) { struct usb_interface *iface; struct usb_host_interface *alt; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i, ret, manual = 0; unsigned int epaddr; unsigned int pipe; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; iface = usb_ifnum_to_if(dev, interface); if (!iface) { dev_dbg(&dev->dev, "selecting invalid interface %d\n", interface); return -EINVAL; } if (iface->unregistering) return -ENODEV; alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } /* * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, * including freeing dropped endpoint ring buffers. * Make sure the interface endpoints are flushed before that */ usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new alt setting is installed, * so that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* Changing alt-setting also frees any allocated streams */ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++) iface->cur_altsetting->endpoint[i].streams = 0; ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt); if (ret < 0) { dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n", alternate); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else ret = usb_control_msg_send(dev, 0, USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, alternate, interface, NULL, 0, 5000, GFP_NOIO); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. */ if (ret == -EPIPE && iface->num_altsetting == 1) { dev_dbg(&dev->dev, "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; } else if (ret) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } mutex_unlock(hcd->bandwidth_mutex); /* FIXME drivers shouldn't need to replicate/bugfix the logic here * when they implement async or easily-killable versions of this or * other "should-be-internal" functions (like clear_halt). * should hcd+usbcore postprocess control requests? */ /* prevent submissions using previous endpoint settings */ if (iface->cur_altsetting != alt) { remove_intf_ep_devs(iface); usb_remove_sysfs_intf_files(iface); } usb_disable_interface(dev, iface, true); iface->cur_altsetting = alt; /* Now that the interface is installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* If the interface only has one altsetting and the device didn't * accept the request, we attempt to carry out the equivalent action * by manually clearing the HALT feature for each endpoint in the * new altsetting. */ if (manual) { for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, USB_ENDPOINT_NUMBER_MASK & epaddr) | (usb_endpoint_out(epaddr) ? USB_DIR_OUT : USB_DIR_IN); usb_clear_halt(dev, pipe); } } /* 9.1.1.5: reset toggles for all endpoints in the new altsetting * * Note: * Despite EP0 is always present in all interfaces/AS, the list of * endpoints from the descriptor does not contain EP0. Due to its * omnipresence one might expect EP0 being considered "affected" by * any SetInterface request and hence assume toggles need to be reset. * However, EP0 toggles are re-synced for every individual transfer * during the SETUP stage - hence EP0 toggles are "don't care" here. * (Likewise, EP0 never "halts" on well designed devices.) */ usb_enable_interface(dev, iface, true); if (device_is_registered(&iface->dev)) { usb_create_sysfs_intf_files(iface); create_intf_ep_devs(iface); } return 0; } EXPORT_SYMBOL_GPL(usb_set_interface); /** * usb_reset_configuration - lightweight device reset * @dev: the device whose configuration is being reset * * This issues a standard SET_CONFIGURATION request to the device using * the current configuration. The effect is to reset most USB-related * state in the device, including interface altsettings (reset to zero), * endpoint halts (cleared), and endpoint state (only for bulk and interrupt * endpoints). Other usbcore state is unchanged, including bindings of * usb device drivers to interfaces. * * Because this affects multiple interfaces, avoid using this with composite * (multi-interface) devices. Instead, the driver for each interface may * use usb_set_interface() on the interfaces it claims. Be careful though; * some devices don't support the SET_INTERFACE request, and others won't * reset all the interface state (notably endpoint state). Resetting the whole * configuration would affect other drivers' interfaces. * * The caller must own the device lock. * * Return: Zero on success, else a negative error code. * * If this routine fails the device will probably be in an unusable state * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { int i, retval; struct usb_host_config *config; struct usb_hcd *hcd = bus_to_hcd(dev->bus); if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; /* caller must have locked the device and must own * the usb bus readlock (so driver bindings are stable); * calls during probe() are fine */ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the configuration is reset, so * that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (retval) { usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) { remove_intf_ep_devs(intf); usb_remove_sysfs_intf_files(intf); } intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); if (device_is_registered(&intf->dev)) { usb_create_sysfs_intf_files(intf); create_intf_ep_devs(intf); } } /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); return 0; } EXPORT_SYMBOL_GPL(usb_reset_configuration); static void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = altsetting_to_usb_interface_cache(intf->altsetting); kref_put(&intfc->ref, usb_release_interface_cache); usb_put_dev(interface_to_usbdev(intf)); of_node_put(dev->of_node); kfree(intf); } /* * usb_deauthorize_interface - deauthorize an USB interface * * @intf: USB interface structure */ void usb_deauthorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; device_lock(dev->parent); if (intf->authorized) { device_lock(dev); intf->authorized = 0; device_unlock(dev); usb_forced_unbind_intf(intf); } device_unlock(dev->parent); } /* * usb_authorize_interface - authorize an USB interface * * @intf: USB interface structure */ void usb_authorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; if (!intf->authorized) { device_lock(dev); intf->authorized = 1; /* authorize interface */ device_unlock(dev); } } static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_device *usb_dev; const struct usb_interface *intf; const struct usb_host_interface *alt; intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); alt = intf->cur_altsetting; if (add_uevent_var(env, "INTERFACE=%d/%d/%d", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=usb:" "v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice), usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol, alt->desc.bInterfaceNumber)) return -ENOMEM; return 0; } const struct device_type usb_if_device_type = { .name = "usb_interface", .release = usb_release_interface, .uevent = usb_if_uevent, }; static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev, struct usb_host_config *config, u8 inum) { struct usb_interface_assoc_descriptor *retval = NULL; struct usb_interface_assoc_descriptor *intf_assoc; int first_intf; int last_intf; int i; for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) { intf_assoc = config->intf_assoc[i]; if (intf_assoc->bInterfaceCount == 0) continue; first_intf = intf_assoc->bFirstInterface; last_intf = first_intf + (intf_assoc->bInterfaceCount - 1); if (inum >= first_intf && inum <= last_intf) { if (!retval) retval = intf_assoc; else dev_err(&dev->dev, "Interface #%d referenced" " by multiple IADs\n", inum); } } return retval; } /* * Internal function to queue a device reset * See usb_queue_reset_device() for more details */ static void __usb_queue_reset_device(struct work_struct *ws) { int rc; struct usb_interface *iface = container_of(ws, struct usb_interface, reset_ws); struct usb_device *udev = interface_to_usbdev(iface); rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */ } /* * Internal function to set the wireless_status sysfs attribute * See usb_set_wireless_status() for more details */ static void __usb_wireless_status_intf(struct work_struct *ws) { struct usb_interface *iface = container_of(ws, struct usb_interface, wireless_status_work); device_lock(iface->dev.parent); if (iface->sysfs_files_created) usb_update_wireless_status_attr(iface); device_unlock(iface->dev.parent); usb_put_intf(iface); /* Undo _get_ in usb_set_wireless_status() */ } /** * usb_set_wireless_status - sets the wireless_status struct member * @iface: the interface to modify * @status: the new wireless status * * Set the wireless_status struct member to the new value, and emit * sysfs changes as necessary. * * Returns: 0 on success, -EALREADY if already set. */ int usb_set_wireless_status(struct usb_interface *iface, enum usb_wireless_status status) { if (iface->wireless_status == status) return -EALREADY; usb_get_intf(iface); iface->wireless_status = status; schedule_work(&iface->wireless_status_work); return 0; } EXPORT_SYMBOL_GPL(usb_set_wireless_status); /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated * @configuration: the configuration being chosen. * * Context: task context, might sleep. Caller holds device lock. * * This is used to enable non-default device modes. Not all devices * use this kind of configurability; many devices only have one * configuration. * * @configuration is the value of the configuration to be installed. * According to the USB spec (e.g. section 9.1.1.5), configuration values * must be non-zero; a value of zero indicates that the device in * unconfigured. However some devices erroneously use 0 as one of their * configuration values. To help manage such devices, this routine will * accept @configuration = -1 as indicating the device should be put in * an unconfigured state. * * USB device configurations may affect Linux interoperability, * power consumption and the functionality available. For example, * the default configuration is limited to using 100mA of bus power, * so that when certain device functionality requires more power, * and the device is bus powered, that functionality should be in some * non-default device configuration. Other device modes may also be * reflected as configuration options, such as whether two ISDN * channels are available independently; and choosing between open * standard device protocols (like CDC) or proprietary ones. * * Note that a non-authorized device (dev->authorized == 0) will only * be put in unconfigured mode. * * Note that USB has an additional level of device configurability, * associated with interfaces. That configurability is accessed using * usb_set_interface(). * * This call is synchronous. The calling context must be able to sleep, * must own the device lock, and must not hold the driver model's USB * bus mutex; usb interface driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. */ int usb_set_configuration(struct usb_device *dev, int configuration) { int i, ret; struct usb_host_config *cp = NULL; struct usb_interface **new_interfaces = NULL; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int n, nintf; if (dev->authorized == 0 || configuration == -1) configuration = 0; else { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (dev->config[i].desc.bConfigurationValue == configuration) { cp = &dev->config[i]; break; } } } if ((!cp && configuration != 0)) return -EINVAL; /* The USB spec says configuration 0 means unconfigured. * But if a device includes a configuration numbered 0, * we will accept it as a correctly configured state. * Use -1 if you really want to unconfigure the device. */ if (cp && configuration == 0) dev_warn(&dev->dev, "config 0 descriptor??\n"); /* Allocate memory for new interfaces before doing anything else, * so that if we run out then nothing will have changed. */ n = nintf = 0; if (cp) { nintf = cp->desc.bNumInterfaces; new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces), GFP_NOIO); if (!new_interfaces) return -ENOMEM; for (; n < nintf; ++n) { new_interfaces[n] = kzalloc( sizeof(struct usb_interface), GFP_NOIO); if (!new_interfaces[n]) { ret = -ENOMEM; free_interfaces: while (--n >= 0) kfree(new_interfaces[n]); kfree(new_interfaces); return ret; } } i = dev->bus_mA - usb_get_max_power(dev, cp); if (i < 0) dev_warn(&dev->dev, "new config #%d exceeds power " "limit by %dmA\n", configuration, -i); } /* Wake up the device so we can send it the Set-Config request */ ret = usb_autoresume_device(dev); if (ret) goto free_interfaces; /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ /* Get rid of pending async Set-Config requests for this device */ cancel_async_set_config(dev); /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new configuration is * installed, so that the xHCI driver can recalculate the U1/U2 * timeouts. */ if (dev->actconfig && usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); ret = -ENOMEM; goto free_interfaces; } ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { if (dev->actconfig) usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); usb_autosuspend_device(dev); goto free_interfaces; } /* * Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { struct usb_interface_cache *intfc; struct usb_interface *intf; struct usb_host_interface *alt; u8 ifnum; cp->interface[i] = intf = new_interfaces[i]; intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; intf->authorized = !!HCD_INTF_AUTHORIZED(hcd); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; ifnum = alt->desc.bInterfaceNumber; intf->intf_assoc = find_iad(dev, cp, ifnum); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; if (usb_of_has_combined_node(dev)) { device_set_of_node_from_dev(&intf->dev, &dev->dev); } else { intf->dev.of_node = usb_of_get_interface_node(dev, configuration, ifnum); } ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev)); intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; intf->dev.groups = usb_interface_groups; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); INIT_WORK(&intf->wireless_status_work, __usb_wireless_status_intf); intf->minor = -1; device_initialize(&intf->dev); pm_runtime_no_callbacks(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, ifnum); usb_get_dev(dev); } kfree(new_interfaces); ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (ret && cp) { /* * All the old state is gone, so what else can we do? * The device is probably useless now anyway. */ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); for (i = 0; i < nintf; ++i) { usb_disable_interface(dev, cp->interface[i], true); put_device(&cp->interface[i]->dev); cp->interface[i] = NULL; } cp = NULL; } dev->actconfig = cp; mutex_unlock(hcd->bandwidth_mutex); if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); /* Leave LPM disabled while the device is unconfigured. */ usb_autosuspend_device(dev); return ret; } usb_set_device_state(dev, USB_STATE_CONFIGURED); if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* Enable LTM if it was turned off by usb_disable_device. */ usb_enable_ltm(dev); /* Now that all the interfaces are set up, register them * to trigger binding of drivers to interfaces. probe() * routines may install different altsettings and may * claim() any interfaces not yet bound. Many class drivers * need that: CDC, audio, video, etc. */ for (i = 0; i < nintf; ++i) { struct usb_interface *intf = cp->interface[i]; if (intf->dev.of_node && !of_device_is_available(intf->dev.of_node)) { dev_info(&dev->dev, "skipping disabled interface %d\n", intf->cur_altsetting->desc.bInterfaceNumber); continue; } dev_dbg(&dev->dev, "adding %s (config #%d, interface %d)\n", dev_name(&intf->dev), configuration, intf->cur_altsetting->desc.bInterfaceNumber); device_enable_async_suspend(&intf->dev); ret = device_add(&intf->dev); if (ret != 0) { dev_err(&dev->dev, "device_add(%s) --> %d\n", dev_name(&intf->dev), ret); continue; } create_intf_ep_devs(intf); } usb_autosuspend_device(dev); return 0; } EXPORT_SYMBOL_GPL(usb_set_configuration); static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; /* Worker routine for usb_driver_set_configuration() */ static void driver_set_config_work(struct work_struct *work) { struct set_config_request *req = container_of(work, struct set_config_request, work); struct usb_device *udev = req->udev; usb_lock_device(udev); spin_lock(&set_config_lock); list_del(&req->node); spin_unlock(&set_config_lock); if (req->config >= -1) /* Is req still valid? */ usb_set_configuration(udev, req->config); usb_unlock_device(udev); usb_put_dev(udev); kfree(req); } /* Cancel pending Set-Config requests for a device whose configuration * was just changed */ static void cancel_async_set_config(struct usb_device *udev) { struct set_config_request *req; spin_lock(&set_config_lock); list_for_each_entry(req, &set_config_list, node) { if (req->udev == udev) req->config = -999; /* Mark as cancelled */ } spin_unlock(&set_config_lock); } /** * usb_driver_set_configuration - Provide a way for drivers to change device configurations * @udev: the device whose configuration is being updated * @config: the configuration being chosen. * Context: In process context, must be able to sleep * * Device interface drivers are not allowed to change device configurations. * This is because changing configurations will destroy the interface the * driver is bound to and create new ones; it would be like a floppy-disk * driver telling the computer to replace the floppy-disk drive with a * tape drive! * * Still, in certain specialized circumstances the need may arise. This * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * * Return: 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ int usb_driver_set_configuration(struct usb_device *udev, int config) { struct set_config_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->udev = udev; req->config = config; INIT_WORK(&req->work, driver_set_config_work); spin_lock(&set_config_lock); list_add(&req->node, &set_config_list); spin_unlock(&set_config_lock); usb_get_dev(udev); schedule_work(&req->work); return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration); /** * cdc_parse_cdc_header - parse the extra headers present in CDC devices * @hdr: the place to put the results of the parsing * @intf: the interface for which parsing is requested * @buffer: pointer to the extra headers to be parsed * @buflen: length of the extra headers * * This evaluates the extra headers present in CDC devices which * bind the interfaces for data and control and provide details * about the capabilities of the device. * * Return: number of descriptors parsed or -EINVAL * if the header is contradictory beyond salvage */ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen) { /* duplicates are ignored */ struct usb_cdc_union_desc *union_header = NULL; /* duplicates are not tolerated */ struct usb_cdc_header_desc *header = NULL; struct usb_cdc_ether_desc *ether = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; struct usb_cdc_mdlm_desc *desc = NULL; unsigned int elength; int cnt = 0; memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); hdr->phonet_magic_present = false; while (buflen > 0) { elength = buffer[0]; if (!elength) { dev_err(&intf->dev, "skipping garbage byte\n"); elength = 1; goto next_desc; } if ((buflen < elength) || (elength < 3)) { dev_err(&intf->dev, "invalid descriptor buffer length\n"); break; } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (elength < sizeof(struct usb_cdc_union_desc)) goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: if (elength < sizeof(struct usb_cdc_country_functional_desc)) goto next_desc; hdr->usb_cdc_country_functional_desc = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: if (elength != sizeof(struct usb_cdc_header_desc)) goto next_desc; if (header) return -EINVAL; header = (struct usb_cdc_header_desc *)buffer; break; case USB_CDC_ACM_TYPE: if (elength < sizeof(struct usb_cdc_acm_descriptor)) goto next_desc; hdr->usb_cdc_acm_descriptor = (struct usb_cdc_acm_descriptor *)buffer; break; case USB_CDC_ETHERNET_TYPE: if (elength != sizeof(struct usb_cdc_ether_desc)) goto next_desc; if (ether) return -EINVAL; ether = (struct usb_cdc_ether_desc *)buffer; break; case USB_CDC_CALL_MANAGEMENT_TYPE: if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) goto next_desc; hdr->usb_cdc_call_mgmt_descriptor = (struct usb_cdc_call_mgmt_descriptor *)buffer; break; case USB_CDC_DMM_TYPE: if (elength < sizeof(struct usb_cdc_dmm_desc)) goto next_desc; hdr->usb_cdc_dmm_desc = (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; detail = (struct usb_cdc_mdlm_detail_desc *)buffer; break; case USB_CDC_NCM_TYPE: if (elength < sizeof(struct usb_cdc_ncm_desc)) goto next_desc; hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; break; case USB_CDC_MBIM_TYPE: if (elength < sizeof(struct usb_cdc_mbim_desc)) goto next_desc; hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; break; case USB_CDC_MBIM_EXTENDED_TYPE: if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) break; hdr->usb_cdc_mbim_extended_desc = (struct usb_cdc_mbim_extended_desc *)buffer; break; case CDC_PHONET_MAGIC_NUMBER: hdr->phonet_magic_present = true; break; default: /* * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", buffer[2], elength); goto next_desc; } cnt++; next_desc: buflen -= elength; buffer += elength; } hdr->usb_cdc_union_desc = union_header; hdr->usb_cdc_header_desc = header; hdr->usb_cdc_mdlm_detail_desc = detail; hdr->usb_cdc_mdlm_desc = desc; hdr->usb_cdc_ether_desc = ether; return cnt; } EXPORT_SYMBOL(cdc_parse_cdc_header);
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> */ #ifndef __LOCAL_NFC_H #define __LOCAL_NFC_H #include <net/nfc/nfc.h> #include <net/sock.h> #define NFC_TARGET_MODE_IDLE 0 #define NFC_TARGET_MODE_SLEEP 1 struct nfc_protocol { int id; struct proto *proto; struct module *owner; int (*create)(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern); }; struct nfc_rawsock { struct sock sk; struct nfc_dev *dev; u32 target_idx; struct work_struct tx_work; bool tx_work_scheduled; }; struct nfc_sock_list { struct hlist_head head; rwlock_t lock; }; #define nfc_rawsock(sk) ((struct nfc_rawsock *) sk) #define to_rawsock_sk(_tx_work) \ ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) struct nfc_llcp_sdp_tlv; void nfc_llcp_mac_is_down(struct nfc_dev *dev); void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); int nfc_llcp_register_device(struct nfc_dev *dev); void nfc_llcp_unregister_device(struct nfc_dev *dev); int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len); u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); int nfc_llcp_local_put(struct nfc_llcp_local *local); int __init nfc_llcp_init(void); void nfc_llcp_exit(void); void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp); void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head); int __init rawsock_init(void); void rawsock_exit(void); int __init af_nfc_init(void); void af_nfc_exit(void); int nfc_proto_register(const struct nfc_protocol *nfc_proto); void nfc_proto_unregister(const struct nfc_protocol *nfc_proto); extern int nfc_devlist_generation; extern struct mutex nfc_devlist_mutex; int __init nfc_genl_init(void); void nfc_genl_exit(void); void nfc_genl_data_init(struct nfc_genl_data *genl_data); void nfc_genl_data_exit(struct nfc_genl_data *genl_data); int nfc_genl_targets_found(struct nfc_dev *dev); int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx); int nfc_genl_device_added(struct nfc_dev *dev); int nfc_genl_device_removed(struct nfc_dev *dev); int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); int nfc_genl_dep_link_down_event(struct nfc_dev *dev); int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol); int nfc_genl_tm_deactivated(struct nfc_dev *dev); int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list); int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type); int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx); int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction); int nfc_genl_se_connectivity(struct nfc_dev *dev, u8 se_idx); struct nfc_dev *nfc_get_device(unsigned int idx); static inline void nfc_put_device(struct nfc_dev *dev) { put_device(&dev->dev); } static inline void nfc_device_iter_init(struct class_dev_iter *iter) { class_dev_iter_init(iter, &nfc_class, NULL, NULL); } static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter) { struct device *d = class_dev_iter_next(iter); if (!d) return NULL; return to_nfc_dev(d); } static inline void nfc_device_iter_exit(struct class_dev_iter *iter) { class_dev_iter_exit(iter); } int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name); int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, u32 result); int nfc_dev_up(struct nfc_dev *dev); int nfc_dev_down(struct nfc_dev *dev); int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols); int nfc_stop_poll(struct nfc_dev *dev); int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, u8 comm_mode); int nfc_dep_link_down(struct nfc_dev *dev); int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol); int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode); int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context); int nfc_enable_se(struct nfc_dev *dev, u32 se_idx); int nfc_disable_se(struct nfc_dev *dev, u32 se_idx); #endif /* __LOCAL_NFC_H */
9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 // SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/crc32.h> #include <linux/firmware.h> #include <linux/kstrtox.h> #include "core.h" #include "debug.h" #include "hif.h" #include "wmi-ops.h" /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 #define ATH10K_DEBUG_CAL_DATA_LEN 12064 void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_info(ar->dev, "%pV", &vaf); trace_ath10k_log_info(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_info); void ath10k_debug_print_hwfw_info(struct ath10k *ar) { const struct firmware *firmware; char fw_features[128] = {}; u32 crc = 0; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", ar->hw_params.name, ar->target_version, ar->bus_param.chip_id, ar->id.subsystem_vendor, ar->id.subsystem_device); ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", IS_ENABLED(CONFIG_ATH10K_DEBUG), IS_ENABLED(CONFIG_ATH10K_DEBUGFS), IS_ENABLED(CONFIG_ATH10K_TRACING), IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), IS_ENABLED(CONFIG_NL80211_TESTMODE)); firmware = ar->normal_mode_fw.fw_file.firmware; if (firmware) crc = crc32_le(0, firmware->data, firmware->size); ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, ar->fw_api, fw_features, crc); } void ath10k_debug_print_board_info(struct ath10k *ar) { char boardinfo[100]; const struct firmware *board; u32 crc; if (ar->id.bmi_ids_valid) scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", ar->id.bmi_chip_id, ar->id.bmi_board_id); else scnprintf(boardinfo, sizeof(boardinfo), "N/A"); board = ar->normal_mode_fw.board; if (!IS_ERR_OR_NULL(board)) crc = crc32_le(0, board->data, board->size); else crc = 0; ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, boardinfo, crc); } void ath10k_debug_print_boot_info(struct ath10k *ar) { ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", ar->htt.target_version_major, ar->htt.target_version_minor, ar->normal_mode_fw.fw_file.wmi_op_version, ar->normal_mode_fw.fw_file.htt_op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)); } void ath10k_print_driver_info(struct ath10k *ar) { ath10k_debug_print_hwfw_info(ar); ath10k_debug_print_board_info(ar); ath10k_debug_print_boot_info(ar); } EXPORT_SYMBOL(ath10k_print_driver_info); void ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_err(ar->dev, "%pV", &vaf); trace_ath10k_log_err(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_err); void ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_warn_ratelimited(ar->dev, "%pV", &vaf); trace_ath10k_log_warn(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_warn); #ifdef CONFIG_ATH10K_DEBUGFS static ssize_t ath10k_read_wmi_services(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char *buf; size_t len = 0, buf_len = 8192; const char *name; ssize_t ret_cnt; bool enabled; int i; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); for (i = 0; i < WMI_SERVICE_MAX; i++) { enabled = test_bit(i, ar->wmi.svc_map); name = wmi_service_name(i); if (!name) { if (enabled) len += scnprintf(buf + len, buf_len - len, "%-40s %s (bit %d)\n", "unknown", "enabled", i); continue; } len += scnprintf(buf + len, buf_len - len, "%-40s %s\n", name, enabled ? "enabled" : "-"); } spin_unlock_bh(&ar->data_lock); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); mutex_unlock(&ar->conf_mutex); kfree(buf); return ret_cnt; } static const struct file_operations fops_wmi_services = { .read = ath10k_read_wmi_services, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath10k_fw_stats_pdevs_free(struct list_head *head) { struct ath10k_fw_stats_pdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_stats_vdevs_free(struct list_head *head) { struct ath10k_fw_stats_vdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_stats_peers_free(struct list_head *head) { struct ath10k_fw_stats_peer *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_extd_stats_peers_free(struct list_head *head) { struct ath10k_fw_extd_stats_peer *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_debug_fw_stats_reset(struct ath10k *ar) { spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; ar->debug.fw_stats.extended = false; ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); spin_unlock_bh(&ar->data_lock); } void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; bool is_start, is_started, is_end; size_t num_peers; size_t num_vdevs; int ret; INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.peers); INIT_LIST_HEAD(&stats.peers_extd); spin_lock_bh(&ar->data_lock); ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); if (ret) { ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); goto free; } /* Stat data may exceed htc-wmi buffer limit. In such case firmware * splits the stats data and delivers it in a ping-pong fashion of * request cmd-update event. * * However there is no explicit end-of-data. Instead start-of-data is * used as an implicit one. This works as follows: * a) discard stat update events until one with pdev stats is * delivered - this skips session started at end of (b) * b) consume stat update events until another one with pdev stats is * delivered which is treated as end-of-data and is itself discarded */ if (ath10k_peer_stats_enabled(ar)) ath10k_sta_update_rx_duration(ar, &stats); if (ar->debug.fw_stats_done) { if (!ath10k_peer_stats_enabled(ar)) ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; } num_peers = list_count_nodes(&ar->debug.fw_stats.peers); num_vdevs = list_count_nodes(&ar->debug.fw_stats.vdevs); is_start = (list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); if (is_start) list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); if (is_end) ar->debug.fw_stats_done = true; if (stats.extended) ar->debug.fw_stats.extended = true; is_started = !list_empty(&ar->debug.fw_stats.pdevs); if (is_started && !is_end) { if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) { /* Although this is unlikely impose a sane limit to * prevent firmware from DoS-ing the host. */ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } if (num_vdevs >= BITS_PER_LONG) { ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_warn(ar, "dropping fw vdev stats\n"); goto free; } if (!list_empty(&stats.peers)) list_splice_tail_init(&stats.peers_extd, &ar->debug.fw_stats.peers_extd); list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } complete(&ar->debug.fw_stats_complete); free: /* In some cases lists have been spliced and cleared. Free up * resources if that is not the case. */ ath10k_fw_stats_pdevs_free(&stats.pdevs); ath10k_fw_stats_vdevs_free(&stats.vdevs); ath10k_fw_stats_peers_free(&stats.peers); ath10k_fw_extd_stats_peers_free(&stats.peers_extd); spin_unlock_bh(&ar->data_lock); } int ath10k_debug_fw_stats_request(struct ath10k *ar) { unsigned long timeout, time_left; int ret; lockdep_assert_held(&ar->conf_mutex); timeout = jiffies + msecs_to_jiffies(1 * HZ); ath10k_debug_fw_stats_reset(ar); for (;;) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; reinit_completion(&ar->debug.fw_stats_complete); ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask); if (ret) { ath10k_warn(ar, "could not request stats (%d)\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.fw_stats_complete, 1 * HZ); if (!time_left) return -ETIMEDOUT; spin_lock_bh(&ar->data_lock); if (ar->debug.fw_stats_done) { spin_unlock_bh(&ar->data_lock); break; } spin_unlock_bh(&ar->data_lock); } return 0; } static int ath10k_fw_stats_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_fw_stats_request(ar); if (ret) { ath10k_warn(ar, "failed to request fw stats: %d\n", ret); goto err_free; } ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf); if (ret) { ath10k_warn(ar, "failed to fill fw stats: %d\n", ret); goto err_free; } file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_fw_stats_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_fw_stats = { .open = ath10k_fw_stats_open, .release = ath10k_fw_stats_release, .read = ath10k_fw_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; size_t len = 0, buf_len = 500; char *buf; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock_bh(&ar->data_lock); len += scnprintf(buf + len, buf_len - len, "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); len += scnprintf(buf + len, buf_len - len, "fw_warm_reset_counter\t\t%d\n", ar->stats.fw_warm_reset_counter); len += scnprintf(buf + len, buf_len - len, "fw_cold_reset_counter\t\t%d\n", ar->stats.fw_cold_reset_counter); spin_unlock_bh(&ar->data_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } static const struct file_operations fops_fw_reset_stats = { .open = simple_open, .read = ath10k_debug_fw_reset_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; /* This is a clean assert crash in firmware. */ static int ath10k_debug_fw_assert(struct ath10k *ar) { struct wmi_vdev_install_key_cmd *cmd; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_install_key_cmd *)skb->data; memset(cmd, 0, sizeof(*cmd)); /* big enough number so that firmware asserts */ cmd->vdev_id = __cpu_to_le32(0x7ffe); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_install_key_cmdid); } static ssize_t ath10k_read_simulate_fw_crash(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char buf[] = "To simulate firmware crash write one of the keywords to this file:\n" "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n" "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } /* Simulate firmware crash: * 'soft': Call wmi command causing firmware hang. This firmware hang is * recoverable by warm firmware reset. * 'hard': Force firmware crash by setting any vdev parameter for not allowed * vdev id. This is hard firmware crash because it is recoverable only by cold * firmware reset. */ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32] = {0}; ssize_t rc; int ret; /* filter partial writes and invalid commands */ if (*ppos != 0 || count >= sizeof(buf) || count == 0) return -EINVAL; rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (rc < 0) return rc; /* drop the possible '\n' from the end */ if (buf[*ppos - 1] == '\n') buf[*ppos - 1] = '\0'; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!strcmp(buf, "soft")) { ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); } else if (!strcmp(buf, "hard")) { ath10k_info(ar, "simulating hard firmware crash\n"); /* 0x7fff is vdev id, and it is always out of range for all * firmware variants in order to force a firmware crash. */ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, ar->wmi.vdev_param->rts_threshold, 0); } else if (!strcmp(buf, "assert")) { ath10k_info(ar, "simulating firmware assert crash\n"); ret = ath10k_debug_fw_assert(ar); } else if (!strcmp(buf, "hw-restart")) { ath10k_info(ar, "user requested hw restart\n"); ath10k_core_start_recovery(ar); ret = 0; } else { ret = -EINVAL; goto exit; } if (ret) { ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_simulate_fw_crash = { .read = ath10k_read_simulate_fw_crash, .write = ath10k_write_simulate_fw_crash, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[50]; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_chip_id = { .read = ath10k_read_chip_id, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_reg_addr_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 buf[32]; size_t len = 0; u32 reg_addr; mutex_lock(&ar->conf_mutex); reg_addr = ar->debug.reg_addr; mutex_unlock(&ar->conf_mutex); len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_reg_addr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 reg_addr; int ret; ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr); if (ret) return ret; if (!IS_ALIGNED(reg_addr, 4)) return -EFAULT; mutex_lock(&ar->conf_mutex); ar->debug.reg_addr = reg_addr; mutex_unlock(&ar->conf_mutex); return count; } static const struct file_operations fops_reg_addr = { .read = ath10k_reg_addr_read, .write = ath10k_reg_addr_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_reg_value_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 buf[48]; size_t len; u32 reg_addr, reg_val; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } reg_addr = ar->debug.reg_addr; reg_val = ath10k_hif_read32(ar, reg_addr); len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_reg_value_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 reg_addr, reg_val; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } reg_addr = ar->debug.reg_addr; ret = kstrtou32_from_user(user_buf, count, 0, &reg_val); if (ret) goto exit; ath10k_hif_write32(ar, reg_addr, reg_val); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_reg_value = { .read = ath10k_reg_value_read, .write = ath10k_reg_value_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_mem_value_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 *buf; int ret; if (*ppos < 0) return -EINVAL; if (!count) return 0; mutex_lock(&ar->conf_mutex); buf = vmalloc(count); if (!buf) { ret = -ENOMEM; goto exit; } if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } ret = ath10k_hif_diag_read(ar, *ppos, buf, count); if (ret) { ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n", (u32)(*ppos), ret); goto exit; } ret = copy_to_user(user_buf, buf, count); if (ret) { ret = -EFAULT; goto exit; } count -= ret; *ppos += count; ret = count; exit: vfree(buf); mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_mem_value_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 *buf; int ret; if (*ppos < 0) return -EINVAL; if (!count) return 0; mutex_lock(&ar->conf_mutex); buf = vmalloc(count); if (!buf) { ret = -ENOMEM; goto exit; } if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } ret = copy_from_user(buf, user_buf, count); if (ret) { ret = -EFAULT; goto exit; } ret = ath10k_hif_diag_write(ar, *ppos, buf, count); if (ret) { ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", (u32)(*ppos), ret); goto exit; } *ppos += count; ret = count; exit: vfree(buf); mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_mem_value = { .read = ath10k_mem_value_read, .write = ath10k_mem_value_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; int ret; lockdep_assert_held(&ar->conf_mutex); if (ar->debug.htt_stats_mask == 0) /* htt stats are disabled */ return 0; if (ar->state != ATH10K_STATE_ON) return 0; cookie = get_jiffies_64(); ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, ar->debug.reset_htt_stats, cookie); if (ret) { ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); return ret; } queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL)); return 0; } static void ath10k_debug_htt_stats_dwork(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, debug.htt_stats_dwork.work); mutex_lock(&ar->conf_mutex); ath10k_debug_htt_stats_req(ar); mutex_unlock(&ar->conf_mutex); } static ssize_t ath10k_read_htt_stats_mask(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_htt_stats_mask(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long mask; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &mask); if (ret) return ret; /* max 17 bit masks (for now) */ if (mask > HTT_STATS_BIT_MASK) return -E2BIG; mutex_lock(&ar->conf_mutex); ar->debug.htt_stats_mask = mask; ret = ath10k_debug_htt_stats_req(ar); if (ret) goto out; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_htt_stats_mask = { .read = ath10k_read_htt_stats_mask, .write = ath10k_write_htt_stats_mask, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[64]; u8 amsdu, ampdu; size_t len; mutex_lock(&ar->conf_mutex); amsdu = ar->htt.max_num_amsdu; ampdu = ar->htt.max_num_ampdu; mutex_unlock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int res; char buf[64] = {0}; unsigned int amsdu, ampdu; res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (res <= 0) return res; res = sscanf(buf, "%u %u", &amsdu, &ampdu); if (res != 2) return -EINVAL; mutex_lock(&ar->conf_mutex); res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu); if (res) goto out; res = count; ar->htt.max_num_amsdu = amsdu; ar->htt.max_num_ampdu = ampdu; out: mutex_unlock(&ar->conf_mutex); return res; } static const struct file_operations fops_htt_max_amsdu_ampdu = { .read = ath10k_read_htt_max_amsdu_ampdu, .write = ath10k_write_htt_max_amsdu_ampdu, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_fw_dbglog(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[96]; len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_fw_dbglog(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; char buf[96] = {0}; unsigned int log_level; u64 mask; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; ret = sscanf(buf, "%llx %u", &mask, &log_level); if (!ret) return -EINVAL; if (ret == 1) /* default if user did not specify */ log_level = ATH10K_DBGLOG_LEVEL_WARN; mutex_lock(&ar->conf_mutex); ar->debug.fw_dbglog_mask = mask; ar->debug.fw_dbglog_level = log_level; if (ar->state == ATH10K_STATE_ON) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); if (ret) { ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ret); goto exit; } } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } /* TODO: Would be nice to always support ethtool stats, would need to * move the stats storage out of ath10k_debug, or always have ath10k_debug * struct available.. */ /* This generally corresponds to the debugfs fw_stats file */ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", "d_noise_floor", "d_cycle_count", "d_phy_error", "d_rts_bad", "d_rts_good", "d_tx_power", /* in .5 dbM I think */ "d_rx_crc_err", /* fcs_bad */ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */ "d_no_beacon", "d_tx_mpdus_queued", "d_tx_msdu_queued", "d_tx_msdu_dropped", "d_local_enqued", "d_local_freed", "d_tx_ppdu_hw_queued", "d_tx_ppdu_reaped", "d_tx_fifo_underrun", "d_tx_ppdu_abort", "d_tx_mpdu_requeued", "d_tx_excessive_retries", "d_tx_hw_rate", "d_tx_dropped_sw_retries", "d_tx_illegal_rate", "d_tx_continuous_xretries", "d_tx_timeout", "d_tx_mpdu_txop_limit", "d_pdev_resets", "d_rx_mid_ppdu_route_change", "d_rx_status", "d_rx_extra_frags_ring0", "d_rx_extra_frags_ring1", "d_rx_extra_frags_ring2", "d_rx_extra_frags_ring3", "d_rx_msdu_htt", "d_rx_mpdu_htt", "d_rx_msdu_stack", "d_rx_mpdu_stack", "d_rx_phy_err", "d_rx_phy_err_drops", "d_rx_mpdu_errors", /* FCS, MIC, ENC */ "d_fw_crash_count", "d_fw_warm_reset_count", "d_fw_cold_reset_count", }; #define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, ath10k_gstrings_stats, sizeof(ath10k_gstrings_stats)); } int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return ATH10K_SSTATS_LEN; return 0; } void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct ath10k *ar = hw->priv; static const struct ath10k_fw_stats_pdev zero_stats = {}; const struct ath10k_fw_stats_pdev *pdev_stats; int i = 0, ret; mutex_lock(&ar->conf_mutex); if (ar->state == ATH10K_STATE_ON) { ret = ath10k_debug_fw_stats_request(ar); if (ret) { /* just print a warning and try to use older results */ ath10k_warn(ar, "failed to get fw stats for ethtool: %d\n", ret); } } pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, struct ath10k_fw_stats_pdev, list); if (!pdev_stats) { /* no results available so just return zeroes */ pdev_stats = &zero_stats; } spin_lock_bh(&ar->data_lock); data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */ data[i++] = 0; /* tx bytes */ data[i++] = pdev_stats->htt_mpdus; data[i++] = 0; /* rx bytes */ data[i++] = pdev_stats->ch_noise_floor; data[i++] = pdev_stats->cycle_count; data[i++] = pdev_stats->phy_err_count; data[i++] = pdev_stats->rts_bad; data[i++] = pdev_stats->rts_good; data[i++] = pdev_stats->chan_tx_power; data[i++] = pdev_stats->fcs_bad; data[i++] = ar->stats.rx_crc_err_drop; data[i++] = pdev_stats->no_beacons; data[i++] = pdev_stats->mpdu_enqued; data[i++] = pdev_stats->msdu_enqued; data[i++] = pdev_stats->wmm_drop; data[i++] = pdev_stats->local_enqued; data[i++] = pdev_stats->local_freed; data[i++] = pdev_stats->hw_queued; data[i++] = pdev_stats->hw_reaped; data[i++] = pdev_stats->underrun; data[i++] = pdev_stats->tx_abort; data[i++] = pdev_stats->mpdus_requeued; data[i++] = pdev_stats->tx_ko; data[i++] = pdev_stats->data_rc; data[i++] = pdev_stats->sw_retry_failure; data[i++] = pdev_stats->illgl_rate_phy_err; data[i++] = pdev_stats->pdev_cont_xretry; data[i++] = pdev_stats->pdev_tx_timeout; data[i++] = pdev_stats->txop_ovf; data[i++] = pdev_stats->pdev_resets; data[i++] = pdev_stats->mid_ppdu_route_change; data[i++] = pdev_stats->status_rcvd; data[i++] = pdev_stats->r0_frags; data[i++] = pdev_stats->r1_frags; data[i++] = pdev_stats->r2_frags; data[i++] = pdev_stats->r3_frags; data[i++] = pdev_stats->htt_msdus; data[i++] = pdev_stats->htt_mpdus; data[i++] = pdev_stats->loc_msdus; data[i++] = pdev_stats->loc_mpdus; data[i++] = pdev_stats->phy_errs; data[i++] = pdev_stats->phy_err_drop; data[i++] = pdev_stats->mpdu_errs; data[i++] = ar->stats.fw_crash_counter; data[i++] = ar->stats.fw_warm_reset_counter; data[i++] = ar->stats.fw_cold_reset_counter; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); WARN_ON(i != ATH10K_SSTATS_LEN); } static const struct file_operations fops_fw_dbglog = { .read = ath10k_read_fw_dbglog, .write = ath10k_write_fw_dbglog, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_cal_data_fetch(struct ath10k *ar) { u32 hi_addr; __le32 addr; int ret; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) return -EINVAL; if (ar->hw_params.cal_data_len == 0) return -EOPNOTSUPP; hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); if (ret) { ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); return ret; } ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, ar->hw_params.cal_data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data: %d\n", ret); return ret; } return 0; } static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; mutex_lock(&ar->conf_mutex); if (ar->state == ATH10K_STATE_ON || ar->state == ATH10K_STATE_UTF) { ath10k_debug_cal_data_fetch(ar); } file->private_data = ar; mutex_unlock(&ar->conf_mutex); return 0; } static ssize_t ath10k_debug_cal_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; mutex_lock(&ar->conf_mutex); count = simple_read_from_buffer(user_buf, count, ppos, ar->debug.cal_data, ar->hw_params.cal_data_len); mutex_unlock(&ar->conf_mutex); return count; } static ssize_t ath10k_write_ani_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; u8 enable; if (kstrtou8_from_user(user_buf, count, 0, &enable)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->ani_enabled == enable) { ret = count; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable, enable); if (ret) { ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret); goto exit; } ar->ani_enabled = enable; ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[32]; len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ani_enable = { .read = ath10k_read_ani_enable, .write = ath10k_write_ani_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static const struct file_operations fops_cal_data = { .open = ath10k_debug_cal_data_open, .read = ath10k_debug_cal_data_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_nf_cal_period(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[32]; len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_nf_cal_period(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long period; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &period); if (ret) return ret; if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX) return -EINVAL; /* there's no way to switch back to the firmware default */ if (period == 0) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->debug.nf_cal_period = period; if (ar->state != ATH10K_STATE_ON) { /* firmware is not running, nothing else to do */ ret = count; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); if (ret) { ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_nf_cal_period = { .read = ath10k_read_nf_cal_period, .write = ath10k_write_nf_cal_period, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024) static int ath10k_debug_tpc_stats_request(struct ath10k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->debug.tpc_complete); ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM); if (ret) { ath10k_warn(ar, "failed to request tpc config: %d\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, 1 * HZ); if (time_left == 0) return -ETIMEDOUT; return 0; } void ath10k_debug_tpc_stats_process(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats) { spin_lock_bh(&ar->data_lock); kfree(ar->debug.tpc_stats); ar->debug.tpc_stats = tpc_stats; complete(&ar->debug.tpc_complete); spin_unlock_bh(&ar->data_lock); } void ath10k_debug_tpc_stats_final_process(struct ath10k *ar, struct ath10k_tpc_stats_final *tpc_stats) { spin_lock_bh(&ar->data_lock); kfree(ar->debug.tpc_stats_final); ar->debug.tpc_stats_final = tpc_stats; complete(&ar->debug.tpc_complete); spin_unlock_bh(&ar->data_lock); } static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, unsigned int j, char *buf, size_t *len) { int i; size_t buf_len; static const char table_str[][5] = { "CDD", "STBC", "TXBF" }; static const char pream_str[][6] = { "CCK", "OFDM", "HT20", "HT40", "VHT20", "VHT40", "VHT80", "HTCUP" }; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, "******************* %s POWER TABLE ****************\n", table_str[j]); *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code "); for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i); *len += scnprintf(buf + *len, buf_len - *len, "\n"); for (i = 0; i < tpc_stats->rate_max; i++) { *len += scnprintf(buf + *len, buf_len - *len, "%8d %s 0x%2x %s\n", i, pream_str[tpc_stats->tpc_table[j].pream_idx[i]], tpc_stats->tpc_table[j].rate_code[i], tpc_stats->tpc_table[j].tpc_value[i]); } *len += scnprintf(buf + *len, buf_len - *len, "***********************************\n"); } static void ath10k_tpc_stats_fill(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats, char *buf) { int j; size_t len, buf_len; len = 0; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; spin_lock_bh(&ar->data_lock); if (!tpc_stats) { ath10k_warn(ar, "failed to get tpc stats\n"); goto unlock; } len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "*************************************\n"); len += scnprintf(buf + len, buf_len - len, "TPC config for channel %4d mode %d\n", tpc_stats->chan_freq, tpc_stats->phy_mode); len += scnprintf(buf + len, buf_len - len, "*************************************\n"); len += scnprintf(buf + len, buf_len - len, "CTL = 0x%2x Reg. Domain = %2d\n", tpc_stats->ctl, tpc_stats->reg_domain); len += scnprintf(buf + len, buf_len - len, "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n", tpc_stats->twice_antenna_gain, tpc_stats->twice_antenna_reduction); len += scnprintf(buf + len, buf_len - len, "Power Limit = %2d Reg. Max Power = %2d\n", tpc_stats->power_limit, tpc_stats->twice_max_rd_power / 2); len += scnprintf(buf + len, buf_len - len, "Num tx chains = %2d Num supported rates = %2d\n", tpc_stats->num_tx_chain, tpc_stats->rate_max); for (j = 0; j < WMI_TPC_FLAG; j++) { switch (j) { case WMI_TPC_TABLE_TYPE_CDD: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "CDD not supported\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; case WMI_TPC_TABLE_TYPE_STBC: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "STBC not supported\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; case WMI_TPC_TABLE_TYPE_TXBF: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "TXBF not supported\n***************************\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; default: len += scnprintf(buf + len, buf_len - len, "Invalid Type\n"); break; } } unlock: spin_unlock_bh(&ar->data_lock); if (len >= buf_len) buf[len - 1] = 0; else buf[len] = 0; } static int ath10k_tpc_stats_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_tpc_stats_request(ar); if (ret) { ath10k_warn(ar, "failed to request tpc config stats: %d\n", ret); goto err_free; } ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_tpc_stats_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tpc_stats = { .open = ath10k_tpc_stats_open, .release = ath10k_tpc_stats_release, .read = ath10k_tpc_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; int ath10k_debug_start(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); ret = ath10k_debug_htt_stats_req(ar); if (ret) /* continue normally anyway, this isn't serious */ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", ret); if (ar->debug.fw_dbglog_mask) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ATH10K_DBGLOG_LEVEL_WARN); if (ret) /* not serious */ ath10k_warn(ar, "failed to enable dbglog during start: %d", ret); } if (ar->pktlog_filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, ar->pktlog_filter); if (ret) /* not serious */ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", ar->pktlog_filter, ret); } else { ret = ath10k_wmi_pdev_pktlog_disable(ar); if (ret) /* not serious */ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } if (ar->debug.nf_cal_period && !test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); if (ret) /* not serious */ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n", ret); } return ret; } void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) ath10k_debug_cal_data_fetch(ar); /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid * warning from del_timer(). */ if (ar->debug.htt_stats_mask != 0) cancel_delayed_work(&ar->debug.htt_stats_dwork); ath10k_wmi_pdev_pktlog_disable(ar); } static ssize_t ath10k_write_simulate_radar(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; struct ath10k_vif *arvif; /* Just check for the first vif alone, as all the vifs will be * sharing the same channel and if the channel is disabled, all the * vifs will share the same 'is_started' state. */ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); if (!arvif->is_started) return -EINVAL; ieee80211_radar_detected(ar->hw, NULL); return count; } static const struct file_operations fops_simulate_radar = { .write = ath10k_write_simulate_radar, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define ATH10K_DFS_STAT(s, p) (\ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ ar->debug.dfs_stats.p)) #define ATH10K_DFS_POOL_STAT(s, p) (\ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ ar->debug.dfs_pool_stats.p)) static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { int retval = 0, len = 0; const int size = 8000; struct ath10k *ar = file->private_data; char *buf; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if (!ar->dfs_detector) { len += scnprintf(buf + len, size - len, "DFS not enabled\n"); goto exit; } ar->debug.dfs_pool_stats = ar->dfs_detector->get_stats(ar->dfs_detector); len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); ATH10K_DFS_STAT("reported phy errors", phy_errors); ATH10K_DFS_STAT("pulse events reported", pulses_total); ATH10K_DFS_STAT("DFS pulses detected", pulses_detected); ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded); ATH10K_DFS_STAT("Radars detected", radar_detected); len += scnprintf(buf + len, size - len, "Global Pool statistics:\n"); ATH10K_DFS_POOL_STAT("Pool references", pool_reference); ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated); ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error); ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used); ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated); ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error); ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used); exit: if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_dfs_stats = { .read = ath10k_read_dfs_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_pktlog_filter(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 filter; int ret; if (kstrtouint_from_user(ubuf, count, 0, &filter)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ar->pktlog_filter = filter; ret = count; goto out; } if (filter == ar->pktlog_filter) { ret = count; goto out; } if (filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", ar->pktlog_filter, ret); goto out; } } else { ret = ath10k_wmi_pdev_pktlog_disable(ar); if (ret) { ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); goto out; } } ar->pktlog_filter = filter; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", ar->pktlog_filter); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_pktlog_filter = { .read = ath10k_read_pktlog_filter, .write = ath10k_write_pktlog_filter, .open = simple_open }; static ssize_t ath10k_write_quiet_period(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 period; if (kstrtouint_from_user(ubuf, count, 0, &period)) return -EINVAL; if (period < ATH10K_QUIET_PERIOD_MIN) { ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n", period); return -EINVAL; } mutex_lock(&ar->conf_mutex); ar->thermal.quiet_period = period; ath10k_thermal_set_throttling(ar); mutex_unlock(&ar->conf_mutex); return count; } static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->thermal.quiet_period); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_quiet_period = { .read = ath10k_read_quiet_period, .write = ath10k_write_quiet_period, .open = simple_open }; static ssize_t ath10k_write_btcoex(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; bool val; u32 pdev_param; ret = kstrtobool_from_user(ubuf, count, &val); if (ret) return ret; if (!ar->coex_support) return -EOPNOTSUPP; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) { ret = count; goto exit; } pdev_param = ar->wmi.pdev_param->enable_btcoex; if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); if (ret) { ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret); ret = count; goto exit; } } else { ath10k_info(ar, "restarting firmware due to btcoex change"); ath10k_core_start_recovery(ar); } if (val) set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); else clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags)); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_btcoex = { .read = ath10k_read_btcoex, .write = ath10k_write_btcoex, .open = simple_open }; static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 filter; int ret; if (kstrtouint_from_user(ubuf, count, 0, &filter)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ar->debug.enable_extd_tx_stats = filter; ret = count; goto out; } if (filter == ar->debug.enable_extd_tx_stats) { ret = count; goto out; } ar->debug.enable_extd_tx_stats = filter; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", ar->debug.enable_extd_tx_stats); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_enable_extd_tx_stats = { .read = ath10k_read_enable_extd_tx_stats, .write = ath10k_write_enable_extd_tx_stats, .open = simple_open }; static ssize_t ath10k_write_peer_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; bool val; ret = kstrtobool_from_user(ubuf, count, &val); if (ret) return ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) { ret = count; goto exit; } if (val) set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); else clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); ath10k_info(ar, "restarting firmware due to Peer stats change"); ath10k_core_start_recovery(ar); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags)); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_peer_stats = { .read = ath10k_read_peer_stats, .write = ath10k_write_peer_stats, .open = simple_open }; static ssize_t ath10k_debug_fw_checksums_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len = 0, buf_len = 4096; ssize_t ret_cnt; char *buf; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); len += scnprintf(buf + len, buf_len - len, "firmware-N.bin\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data, ar->normal_mode_fw.fw_file.firmware->size)); len += scnprintf(buf + len, buf_len - len, "athwlan\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data, ar->normal_mode_fw.fw_file.firmware_len)); len += scnprintf(buf + len, buf_len - len, "otp\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.otp_data, ar->normal_mode_fw.fw_file.otp_len)); len += scnprintf(buf + len, buf_len - len, "codeswap\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data, ar->normal_mode_fw.fw_file.codeswap_len)); len += scnprintf(buf + len, buf_len - len, "board-N.bin\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.board->data, ar->normal_mode_fw.board->size)); len += scnprintf(buf + len, buf_len - len, "board\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.board_data, ar->normal_mode_fw.board_len)); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); mutex_unlock(&ar->conf_mutex); kfree(buf); return ret_cnt; } static const struct file_operations fops_fw_checksums = { .read = ath10k_debug_fw_checksums_read, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; u32 mask; ret = kstrtoint_from_user(user_buf, count, 0, &mask); if (ret) return ret; ar->sta_tid_stats_mask = mask; return count; } static const struct file_operations fops_sta_tid_stats_mask = { .read = ath10k_sta_tid_stats_mask_read, .write = ath10k_sta_tid_stats_mask_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->debug.tpc_complete); ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM); if (ret) { ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, 1 * HZ); if (time_left == 0) return -ETIMEDOUT; return 0; } static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_tpc_stats_final_request(ar); if (ret) { ath10k_warn(ar, "failed to request tpc stats final: %d\n", ret); goto err_free; } ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_tpc_stats_final_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_tpc_stats_final_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; unsigned int len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tpc_stats_final = { .open = ath10k_tpc_stats_final_open, .release = ath10k_tpc_stats_final_release, .read = ath10k_tpc_stats_final_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_warm_hw_reset(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; bool val; if (kstrtobool_from_user(user_buf, count, &val)) return -EFAULT; if (!val) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset, WMI_RST_MODE_WARM_RESET); if (ret) { ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_warm_hw_reset = { .write = ath10k_write_warm_hw_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath10k_peer_ps_state_disable(void *data, struct ieee80211_sta *sta) { struct ath10k *ar = data; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; spin_lock_bh(&ar->data_lock); arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; spin_unlock_bh(&ar->data_lock); } static ssize_t ath10k_write_ps_state_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; u32 param; u8 ps_state_enable; if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) return -EINVAL; if (ps_state_enable > 1) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->ps_state_enable == ps_state_enable) { ret = count; goto exit; } param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable; ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable); if (ret) { ath10k_warn(ar, "failed to enable ps_state_enable: %d\n", ret); goto exit; } ar->ps_state_enable = ps_state_enable; if (!ar->ps_state_enable) ieee80211_iterate_stations_atomic(ar->hw, ath10k_peer_ps_state_disable, ar); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_ps_state_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int len = 0; char buf[32]; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->ps_state_enable); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ps_state_enable = { .read = ath10k_read_ps_state_enable, .write = ath10k_write_ps_state_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_reset_htt_stats(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long reset; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &reset); if (ret) return ret; if (reset == 0 || reset > 0x1ffff) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->debug.reset_htt_stats = reset; ret = ath10k_debug_htt_stats_req(ar); if (ret) goto out; ar->debug.reset_htt_stats = 0; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_reset_htt_stats = { .write = ath10k_write_reset_htt_stats, .owner = THIS_MODULE, .open = simple_open, .llseek = default_llseek, }; int ath10k_debug_create(struct ath10k *ar) { ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); if (!ar->debug.cal_data) return -ENOMEM; INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd); return 0; } void ath10k_debug_destroy(struct ath10k *ar) { vfree(ar->debug.cal_data); ar->debug.cal_data = NULL; ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); kfree(ar->debug.tpc_stats_final); } int ath10k_debug_register(struct ath10k *ar) { ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ar->hw->wiphy->debugfsdir); if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) { if (IS_ERR(ar->debug.debugfs_phy)) return PTR_ERR(ar->debug.debugfs_phy); return -ENOMEM; } INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); init_completion(&ar->debug.tpc_complete); init_completion(&ar->debug.fw_stats_complete); debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_stats); debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_reset_stats); debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar, &fops_wmi_services); debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar, &fops_reg_addr); debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar, &fops_reg_value); debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar, &fops_mem_value); debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar, &fops_chip_id); debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar, &fops_htt_stats_mask); debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar, &fops_htt_max_amsdu_ampdu); debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, &fops_cal_data); debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, &fops_nf_cal_period); } debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ani_enable); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, ar, &fops_simulate_radar); debugfs_create_bool("dfs_block_radar_events", 0200, ar->debug.debugfs_phy, &ar->dfs_block_radar_events); debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar, &fops_dfs_stats); } debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, &fops_pktlog_filter); if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, &fops_quiet_period); debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, &fops_tpc_stats); if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar, &fops_btcoex); if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar, &fops_peer_stats); debugfs_create_file("enable_extd_tx_stats", 0644, ar->debug.debugfs_phy, ar, &fops_enable_extd_tx_stats); } debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, &fops_fw_checksums); if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS)) debugfs_create_file("sta_tid_stats_mask", 0600, ar->debug.debugfs_phy, ar, &fops_sta_tid_stats_mask); if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map)) debugfs_create_file("tpc_stats_final", 0400, ar->debug.debugfs_phy, ar, &fops_tpc_stats_final); if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)) debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar, &fops_warm_hw_reset); debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ps_state_enable); debugfs_create_file("reset_htt_stats", 0200, ar->debug.debugfs_phy, ar, &fops_reset_htt_stats); return 0; } void ath10k_debug_unregister(struct ath10k *ar) { cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); } #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG void __ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (ath10k_debug_mask & mask) dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); trace_ath10k_log_dbg(ar, mask, &vaf); va_end(args); } EXPORT_SYMBOL(__ath10k_dbg); void ath10k_dbg_dump(struct ath10k *ar, enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { char linebuf[256]; size_t linebuflen; const void *ptr; if (ath10k_debug_mask & mask) { if (msg) __ath10k_dbg(ar, mask, "%s\n", msg); for (ptr = buf; (ptr - buf) < len; ptr += 16) { linebuflen = 0; linebuflen += scnprintf(linebuf + linebuflen, sizeof(linebuf) - linebuflen, "%s%08x: ", (prefix ? prefix : ""), (unsigned int)(ptr - buf)); hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, linebuf + linebuflen, sizeof(linebuf) - linebuflen, true); dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); } } /* tracing code doesn't like null strings :/ */ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath10k_dbg_dump); #endif /* CONFIG_ATH10K_DEBUG */
3 2 1 1 4 3 3 1 4 2 4 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some cypress "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include "hid-ids.h" #define CP_RDESC_SWAPPED_MIN_MAX 0x01 #define CP_2WHEEL_MOUSE_HACK 0x02 #define CP_2WHEEL_MOUSE_HACK_ON 0x04 #define VA_INVAL_LOGICAL_BOUNDARY 0x08 /* * Some USB barcode readers from cypress have usage min and usage max in * the wrong order */ static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned int i; if (*rsize < 4) return rdesc; for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { rdesc[i] = 0x19; rdesc[i + 2] = 0x29; swap(rdesc[i + 3], rdesc[i + 1]); } return rdesc; } static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { /* * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly * reports Logical Minimum of its Consumer Control device as 572 * (0x02 0x3c). Fix this by setting its Logical Minimum to zero. */ if (*rsize == 25 && rdesc[0] == 0x05 && rdesc[1] == 0x0c && rdesc[2] == 0x09 && rdesc[3] == 0x01 && rdesc[6] == 0x19 && rdesc[7] == 0x00 && rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) { hid_info(hdev, "fixing up varmilo VA104M consumer control report descriptor\n"); rdesc[12] = 0x00; rdesc[13] = 0x00; } return rdesc; } static const __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & CP_RDESC_SWAPPED_MIN_MAX) rdesc = cp_rdesc_fixup(hdev, rdesc, rsize); if (quirks & VA_INVAL_LOGICAL_BOUNDARY) rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize); return rdesc; } static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (!(quirks & CP_2WHEEL_MOUSE_HACK)) return 0; if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if (usage->hid == 0x00090005) return -1; return 0; } static int cp_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type || !(quirks & CP_2WHEEL_MOUSE_HACK)) return 0; if (usage->hid == 0x00090005) { if (value) quirks |= CP_2WHEEL_MOUSE_HACK_ON; else quirks &= ~CP_2WHEEL_MOUSE_HACK_ON; hid_set_drvdata(hdev, (void *)quirks); return 1; } if (usage->code == REL_WHEEL && (quirks & CP_2WHEEL_MOUSE_HACK_ON)) { struct input_dev *input = field->hidinput->input; input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int cp_probe(struct hid_device *hdev, const struct hid_device_id *id) { unsigned long quirks = id->driver_data; int ret; hid_set_drvdata(hdev, (void *)quirks); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id cp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE), .driver_data = CP_2WHEEL_MOUSE_HACK }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1), .driver_data = VA_INVAL_LOGICAL_BOUNDARY }, { } }; MODULE_DEVICE_TABLE(hid, cp_devices); static struct hid_driver cp_driver = { .name = "cypress", .id_table = cp_devices, .report_fixup = cp_report_fixup, .input_mapped = cp_input_mapped, .event = cp_event, .probe = cp_probe, }; module_hid_driver(cp_driver); MODULE_DESCRIPTION("HID driver for some cypress \"special\" devices"); MODULE_LICENSE("GPL");
41 41 41 2 40 41 2 40 41 11 2 10 11 41 2 40 41 2 40 41 41 40 41 40 41 2 39 41 41 2 40 41 8 3 6 8 8 2 7 8 33 3 3 3 41 5 38 41 3 39 41 2 41 2 40 41 1 3 39 41 3 39 41 2 40 41 1 41 41 40 41 2 40 41 1 2 40 41 1 40 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 // SPDX-License-Identifier: GPL-2.0-or-later /* Linux driver for Philips webcam USB and Video4Linux interface part. (C) 1999-2004 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) (C) 2011 Hans de Goede <hdegoede@redhat.com> NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/jiffies.h> #include <asm/io.h> #include "pwc.h" #define PWC_CID_CUSTOM(ctrl) ((V4L2_CID_USER_BASE | 0xf000) + custom_ ## ctrl) static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl); static int pwc_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops pwc_ctrl_ops = { .g_volatile_ctrl = pwc_g_volatile_ctrl, .s_ctrl = pwc_s_ctrl, }; enum { awb_indoor, awb_outdoor, awb_fl, awb_manual, awb_auto }; enum { custom_autocontour, custom_contour, custom_noise_reduction, custom_awb_speed, custom_awb_delay, custom_save_user, custom_restore_user, custom_restore_factory }; static const char * const pwc_auto_whitebal_qmenu[] = { "Indoor (Incandescant Lighting) Mode", "Outdoor (Sunlight) Mode", "Indoor (Fluorescent Lighting) Mode", "Manual Mode", "Auto Mode", NULL }; static const struct v4l2_ctrl_config pwc_auto_white_balance_cfg = { .ops = &pwc_ctrl_ops, .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_MENU, .max = awb_auto, .qmenu = pwc_auto_whitebal_qmenu, }; static const struct v4l2_ctrl_config pwc_autocontour_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(autocontour), .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto contour", .min = 0, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config pwc_contour_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(contour), .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contour", .flags = V4L2_CTRL_FLAG_SLIDER, .min = 0, .max = 63, .step = 1, }; static const struct v4l2_ctrl_config pwc_backlight_cfg = { .ops = &pwc_ctrl_ops, .id = V4L2_CID_BACKLIGHT_COMPENSATION, .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config pwc_flicker_cfg = { .ops = &pwc_ctrl_ops, .id = V4L2_CID_BAND_STOP_FILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config pwc_noise_reduction_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(noise_reduction), .type = V4L2_CTRL_TYPE_INTEGER, .name = "Dynamic Noise Reduction", .min = 0, .max = 3, .step = 1, }; static const struct v4l2_ctrl_config pwc_save_user_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(save_user), .type = V4L2_CTRL_TYPE_BUTTON, .name = "Save User Settings", }; static const struct v4l2_ctrl_config pwc_restore_user_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(restore_user), .type = V4L2_CTRL_TYPE_BUTTON, .name = "Restore User Settings", }; static const struct v4l2_ctrl_config pwc_restore_factory_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(restore_factory), .type = V4L2_CTRL_TYPE_BUTTON, .name = "Restore Factory Settings", }; static const struct v4l2_ctrl_config pwc_awb_speed_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(awb_speed), .type = V4L2_CTRL_TYPE_INTEGER, .name = "Auto White Balance Speed", .min = 1, .max = 32, .step = 1, }; static const struct v4l2_ctrl_config pwc_awb_delay_cfg = { .ops = &pwc_ctrl_ops, .id = PWC_CID_CUSTOM(awb_delay), .type = V4L2_CTRL_TYPE_INTEGER, .name = "Auto White Balance Delay", .min = 0, .max = 63, .step = 1, }; int pwc_init_controls(struct pwc_device *pdev) { struct v4l2_ctrl_handler *hdl; struct v4l2_ctrl_config cfg; int r, def; hdl = &pdev->ctrl_handler; r = v4l2_ctrl_handler_init(hdl, 20); if (r) return r; /* Brightness, contrast, saturation, gamma */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, BRIGHTNESS_FORMATTER, &def); if (r || def > 127) def = 63; pdev->brightness = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 127, 1, def); r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, CONTRAST_FORMATTER, &def); if (r || def > 63) def = 31; pdev->contrast = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_CONTRAST, 0, 63, 1, def); if (pdev->type >= 675) { if (pdev->type < 730) pdev->saturation_fmt = SATURATION_MODE_FORMATTER2; else pdev->saturation_fmt = SATURATION_MODE_FORMATTER1; r = pwc_get_s8_ctrl(pdev, GET_CHROM_CTL, pdev->saturation_fmt, &def); if (r || def < -100 || def > 100) def = 0; pdev->saturation = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_SATURATION, -100, 100, 1, def); } r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, GAMMA_FORMATTER, &def); if (r || def > 31) def = 15; pdev->gamma = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_GAMMA, 0, 31, 1, def); /* auto white balance, red gain, blue gain */ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, WB_MODE_FORMATTER, &def); if (r || def > awb_auto) def = awb_auto; cfg = pwc_auto_white_balance_cfg; cfg.name = v4l2_ctrl_get_name(cfg.id); cfg.def = def; pdev->auto_white_balance = v4l2_ctrl_new_custom(hdl, &cfg, NULL); /* check auto controls to avoid NULL deref in v4l2_ctrl_auto_cluster */ if (!pdev->auto_white_balance) return hdl->error; r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER, &def); if (r) def = 127; pdev->red_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 255, 1, def); r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER, &def); if (r) def = 127; pdev->blue_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 255, 1, def); v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual, true); /* autogain, gain */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AGC_MODE_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0; /* Note a register value if 0 means auto gain is on */ pdev->autogain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, def == 0); if (!pdev->autogain) return hdl->error; r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_AGC_FORMATTER, &def); if (r || def > 63) def = 31; pdev->gain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_GAIN, 0, 63, 1, def); /* auto exposure, exposure */ if (DEVICE_USE_CODEC2(pdev->type)) { r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, SHUTTER_MODE_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0; /* * def = 0 auto, def = ff manual * menu idx 0 = auto, idx 1 = manual */ pdev->exposure_auto = v4l2_ctrl_new_std_menu(hdl, &pwc_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, def != 0); if (!pdev->exposure_auto) return hdl->error; /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &def); if (r || def > 655) def = 655; pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_EXPOSURE, 0, 655, 1, def); /* CODEC2: separate auto gain & auto exposure */ v4l2_ctrl_auto_cluster(2, &pdev->autogain, 0, true); v4l2_ctrl_auto_cluster(2, &pdev->exposure_auto, V4L2_EXPOSURE_MANUAL, true); } else if (DEVICE_USE_CODEC3(pdev->type)) { /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &def); if (r || def > 255) def = 255; pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_EXPOSURE, 0, 255, 1, def); /* CODEC3: both gain and exposure controlled by autogain */ pdev->autogain_expo_cluster[0] = pdev->autogain; pdev->autogain_expo_cluster[1] = pdev->gain; pdev->autogain_expo_cluster[2] = pdev->exposure; v4l2_ctrl_auto_cluster(3, pdev->autogain_expo_cluster, 0, true); } /* color / bw setting */ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, COLOUR_MODE_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0xff; /* def = 0 bw, def = ff color, menu idx 0 = color, idx 1 = bw */ pdev->colorfx = v4l2_ctrl_new_std_menu(hdl, &pwc_ctrl_ops, V4L2_CID_COLORFX, 1, 0, def == 0); /* autocontour, contour */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0; cfg = pwc_autocontour_cfg; cfg.def = def == 0; pdev->autocontour = v4l2_ctrl_new_custom(hdl, &cfg, NULL); if (!pdev->autocontour) return hdl->error; r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &def); if (r || def > 63) def = 31; cfg = pwc_contour_cfg; cfg.def = def; pdev->contour = v4l2_ctrl_new_custom(hdl, &cfg, NULL); v4l2_ctrl_auto_cluster(2, &pdev->autocontour, 0, false); /* backlight */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0; cfg = pwc_backlight_cfg; cfg.name = v4l2_ctrl_get_name(cfg.id); cfg.def = def == 0; pdev->backlight = v4l2_ctrl_new_custom(hdl, &cfg, NULL); /* flikker rediction */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &def); if (r || (def != 0 && def != 0xff)) def = 0; cfg = pwc_flicker_cfg; cfg.name = v4l2_ctrl_get_name(cfg.id); cfg.def = def == 0; pdev->flicker = v4l2_ctrl_new_custom(hdl, &cfg, NULL); /* Dynamic noise reduction */ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER, &def); if (r || def > 3) def = 2; cfg = pwc_noise_reduction_cfg; cfg.def = def; pdev->noise_reduction = v4l2_ctrl_new_custom(hdl, &cfg, NULL); /* Save / Restore User / Factory Settings */ pdev->save_user = v4l2_ctrl_new_custom(hdl, &pwc_save_user_cfg, NULL); pdev->restore_user = v4l2_ctrl_new_custom(hdl, &pwc_restore_user_cfg, NULL); if (pdev->restore_user) pdev->restore_user->flags |= V4L2_CTRL_FLAG_UPDATE; pdev->restore_factory = v4l2_ctrl_new_custom(hdl, &pwc_restore_factory_cfg, NULL); if (pdev->restore_factory) pdev->restore_factory->flags |= V4L2_CTRL_FLAG_UPDATE; /* Auto White Balance speed & delay */ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, &def); if (r || def < 1 || def > 32) def = 1; cfg = pwc_awb_speed_cfg; cfg.def = def; pdev->awb_speed = v4l2_ctrl_new_custom(hdl, &cfg, NULL); r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, &def); if (r || def > 63) def = 0; cfg = pwc_awb_delay_cfg; cfg.def = def; pdev->awb_delay = v4l2_ctrl_new_custom(hdl, &cfg, NULL); if (!(pdev->features & FEATURE_MOTOR_PANTILT)) return hdl->error; /* Motor pan / tilt / reset */ pdev->motor_pan = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_PAN_RELATIVE, -4480, 4480, 64, 0); if (!pdev->motor_pan) return hdl->error; pdev->motor_tilt = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_TILT_RELATIVE, -1920, 1920, 64, 0); pdev->motor_pan_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_PAN_RESET, 0, 0, 0, 0); pdev->motor_tilt_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops, V4L2_CID_TILT_RESET, 0, 0, 0, 0); v4l2_ctrl_cluster(4, &pdev->motor_pan); return hdl->error; } static void pwc_vidioc_fill_fmt(struct v4l2_format *f, int width, int height, u32 pixfmt) { memset(&f->fmt.pix, 0, sizeof(struct v4l2_pix_format)); f->fmt.pix.width = width; f->fmt.pix.height = height; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = pixfmt; f->fmt.pix.bytesperline = f->fmt.pix.width; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.width * 3 / 2; f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n", f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline, f->fmt.pix.sizeimage, (f->fmt.pix.pixelformat)&255, (f->fmt.pix.pixelformat>>8)&255, (f->fmt.pix.pixelformat>>16)&255, (f->fmt.pix.pixelformat>>24)&255); } /* ioctl(VIDIOC_TRY_FMT) */ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f) { int size; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { PWC_DEBUG_IOCTL("Bad video type must be V4L2_BUF_TYPE_VIDEO_CAPTURE\n"); return -EINVAL; } switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_YUV420: break; case V4L2_PIX_FMT_PWC1: if (DEVICE_USE_CODEC23(pdev->type)) { PWC_DEBUG_IOCTL("codec1 is only supported for old pwc webcam\n"); f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; } break; case V4L2_PIX_FMT_PWC2: if (DEVICE_USE_CODEC1(pdev->type)) { PWC_DEBUG_IOCTL("codec23 is only supported for new pwc webcam\n"); f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; } break; default: PWC_DEBUG_IOCTL("Unsupported pixel format\n"); f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; } size = pwc_get_size(pdev, f->fmt.pix.width, f->fmt.pix.height); pwc_vidioc_fill_fmt(f, pwc_image_sizes[size][0], pwc_image_sizes[size][1], f->fmt.pix.pixelformat); return 0; } /* ioctl(VIDIOC_SET_FMT) */ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct pwc_device *pdev = video_drvdata(file); int ret, pixelformat, compression = 0; ret = pwc_vidioc_try_fmt(pdev, f); if (ret < 0) return ret; if (vb2_is_busy(&pdev->vb_queue)) return -EBUSY; pixelformat = f->fmt.pix.pixelformat; PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d format=%c%c%c%c\n", f->fmt.pix.width, f->fmt.pix.height, pdev->vframes, (pixelformat)&255, (pixelformat>>8)&255, (pixelformat>>16)&255, (pixelformat>>24)&255); ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height, pixelformat, 30, &compression, 0); PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret); pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt); return ret; } static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct pwc_device *pdev = video_drvdata(file); strscpy(cap->driver, PWC_NAME, sizeof(cap->driver)); strscpy(cap->card, pdev->vdev.name, sizeof(cap->card)); usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int pwc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { if (i->index) /* Only one INPUT is supported */ return -EINVAL; strscpy(i->name, "Camera", sizeof(i->name)); i->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int pwc_g_input(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int pwc_s_input(struct file *file, void *fh, unsigned int i) { return i ? -EINVAL : 0; } static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct pwc_device *pdev = container_of(ctrl->handler, struct pwc_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: if (pdev->color_bal_valid && (pdev->auto_white_balance->val != awb_auto || time_before(jiffies, pdev->last_color_bal_update + HZ / 4))) { pdev->red_balance->val = pdev->last_red_balance; pdev->blue_balance->val = pdev->last_blue_balance; break; } ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, &pdev->red_balance->val); if (ret) break; ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, &pdev->blue_balance->val); if (ret) break; pdev->last_red_balance = pdev->red_balance->val; pdev->last_blue_balance = pdev->blue_balance->val; pdev->last_color_bal_update = jiffies; pdev->color_bal_valid = true; break; case V4L2_CID_AUTOGAIN: if (pdev->gain_valid && time_before(jiffies, pdev->last_gain_update + HZ / 4)) { pdev->gain->val = pdev->last_gain; break; } ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL, READ_AGC_FORMATTER, &pdev->gain->val); if (ret) break; pdev->last_gain = pdev->gain->val; pdev->last_gain_update = jiffies; pdev->gain_valid = true; if (!DEVICE_USE_CODEC3(pdev->type)) break; /* For CODEC3 where autogain also controls expo */ fallthrough; case V4L2_CID_EXPOSURE_AUTO: if (pdev->exposure_valid && time_before(jiffies, pdev->last_exposure_update + HZ / 4)) { pdev->exposure->val = pdev->last_exposure; break; } ret = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &pdev->exposure->val); if (ret) break; pdev->last_exposure = pdev->exposure->val; pdev->last_exposure_update = jiffies; pdev->exposure_valid = true; break; default: ret = -EINVAL; } if (ret) PWC_ERROR("g_ctrl %s error %d\n", ctrl->name, ret); return ret; } static int pwc_set_awb(struct pwc_device *pdev) { int ret; if (pdev->auto_white_balance->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, WB_MODE_FORMATTER, pdev->auto_white_balance->val); if (ret) return ret; if (pdev->auto_white_balance->val != awb_manual) pdev->color_bal_valid = false; /* Force cache update */ /* * If this is a preset, update our red / blue balance values * so that events get generated for the new preset values */ if (pdev->auto_white_balance->val == awb_indoor || pdev->auto_white_balance->val == awb_outdoor || pdev->auto_white_balance->val == awb_fl) pwc_g_volatile_ctrl(pdev->auto_white_balance); } if (pdev->auto_white_balance->val != awb_manual) return 0; if (pdev->red_balance->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER, pdev->red_balance->val); if (ret) return ret; } if (pdev->blue_balance->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER, pdev->blue_balance->val); if (ret) return ret; } return 0; } /* For CODEC2 models which have separate autogain and auto exposure */ static int pwc_set_autogain(struct pwc_device *pdev) { int ret; if (pdev->autogain->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, AGC_MODE_FORMATTER, pdev->autogain->val ? 0 : 0xff); if (ret) return ret; if (pdev->autogain->val) pdev->gain_valid = false; /* Force cache update */ } if (pdev->autogain->val) return 0; if (pdev->gain->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, PRESET_AGC_FORMATTER, pdev->gain->val); if (ret) return ret; } return 0; } /* For CODEC2 models which have separate autogain and auto exposure */ static int pwc_set_exposure_auto(struct pwc_device *pdev) { int ret; int is_auto = pdev->exposure_auto->val == V4L2_EXPOSURE_AUTO; if (pdev->exposure_auto->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, SHUTTER_MODE_FORMATTER, is_auto ? 0 : 0xff); if (ret) return ret; if (is_auto) pdev->exposure_valid = false; /* Force cache update */ } if (is_auto) return 0; if (pdev->exposure->is_new) { ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL, PRESET_SHUTTER_FORMATTER, pdev->exposure->val); if (ret) return ret; } return 0; } /* For CODEC3 models which have autogain controlling both gain and exposure */ static int pwc_set_autogain_expo(struct pwc_device *pdev) { int ret; if (pdev->autogain->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, AGC_MODE_FORMATTER, pdev->autogain->val ? 0 : 0xff); if (ret) return ret; if (pdev->autogain->val) { pdev->gain_valid = false; /* Force cache update */ pdev->exposure_valid = false; /* Force cache update */ } } if (pdev->autogain->val) return 0; if (pdev->gain->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, PRESET_AGC_FORMATTER, pdev->gain->val); if (ret) return ret; } if (pdev->exposure->is_new) { ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL, PRESET_SHUTTER_FORMATTER, pdev->exposure->val); if (ret) return ret; } return 0; } static int pwc_set_motor(struct pwc_device *pdev) { int ret; pdev->ctrl_buf[0] = 0; if (pdev->motor_pan_reset->is_new) pdev->ctrl_buf[0] |= 0x01; if (pdev->motor_tilt_reset->is_new) pdev->ctrl_buf[0] |= 0x02; if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) { ret = send_control_msg(pdev, SET_MPT_CTL, PT_RESET_CONTROL_FORMATTER, pdev->ctrl_buf, 1); if (ret < 0) return ret; } memset(pdev->ctrl_buf, 0, 4); if (pdev->motor_pan->is_new) { pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF; pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8); } if (pdev->motor_tilt->is_new) { pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF; pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8); } if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) { ret = send_control_msg(pdev, SET_MPT_CTL, PT_RELATIVE_CONTROL_FORMATTER, pdev->ctrl_buf, 4); if (ret < 0) return ret; } return 0; } static int pwc_s_ctrl(struct v4l2_ctrl *ctrl) { struct pwc_device *pdev = container_of(ctrl->handler, struct pwc_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, BRIGHTNESS_FORMATTER, ctrl->val); break; case V4L2_CID_CONTRAST: ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, CONTRAST_FORMATTER, ctrl->val); break; case V4L2_CID_SATURATION: ret = pwc_set_s8_ctrl(pdev, SET_CHROM_CTL, pdev->saturation_fmt, ctrl->val); break; case V4L2_CID_GAMMA: ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, GAMMA_FORMATTER, ctrl->val); break; case V4L2_CID_AUTO_WHITE_BALANCE: ret = pwc_set_awb(pdev); break; case V4L2_CID_AUTOGAIN: if (DEVICE_USE_CODEC2(pdev->type)) ret = pwc_set_autogain(pdev); else if (DEVICE_USE_CODEC3(pdev->type)) ret = pwc_set_autogain_expo(pdev); else ret = -EINVAL; break; case V4L2_CID_EXPOSURE_AUTO: if (DEVICE_USE_CODEC2(pdev->type)) ret = pwc_set_exposure_auto(pdev); else ret = -EINVAL; break; case V4L2_CID_COLORFX: ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, COLOUR_MODE_FORMATTER, ctrl->val ? 0 : 0xff); break; case PWC_CID_CUSTOM(autocontour): if (pdev->autocontour->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, pdev->autocontour->val ? 0 : 0xff); } if (ret == 0 && pdev->contour->is_new) { ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, pdev->contour->val); } break; case V4L2_CID_BACKLIGHT_COMPENSATION: ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER, ctrl->val ? 0 : 0xff); break; case V4L2_CID_BAND_STOP_FILTER: ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, ctrl->val ? 0 : 0xff); break; case PWC_CID_CUSTOM(noise_reduction): ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER, ctrl->val); break; case PWC_CID_CUSTOM(save_user): ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER); break; case PWC_CID_CUSTOM(restore_user): ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER); break; case PWC_CID_CUSTOM(restore_factory): ret = pwc_button_ctrl(pdev, RESTORE_FACTORY_DEFAULTS_FORMATTER); break; case PWC_CID_CUSTOM(awb_speed): ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, ctrl->val); break; case PWC_CID_CUSTOM(awb_delay): ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, ctrl->val); break; case V4L2_CID_PAN_RELATIVE: ret = pwc_set_motor(pdev); break; default: ret = -EINVAL; } if (ret) PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret); return ret; } static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct pwc_device *pdev = video_drvdata(file); /* We only support two format: the raw format, and YUV */ switch (f->index) { case 0: /* RAW format */ f->pixelformat = pdev->type <= 646 ? V4L2_PIX_FMT_PWC1 : V4L2_PIX_FMT_PWC2; break; case 1: f->pixelformat = V4L2_PIX_FMT_YUV420; break; default: return -EINVAL; } return 0; } static int pwc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct pwc_device *pdev = video_drvdata(file); if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; PWC_DEBUG_IOCTL("ioctl(VIDIOC_G_FMT) return size %dx%d\n", pdev->width, pdev->height); pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt); return 0; } static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct pwc_device *pdev = video_drvdata(file); return pwc_vidioc_try_fmt(pdev, f); } static int pwc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct pwc_device *pdev = video_drvdata(file); unsigned int i = 0, index = fsize->index; if (fsize->pixel_format == V4L2_PIX_FMT_YUV420 || (fsize->pixel_format == V4L2_PIX_FMT_PWC1 && DEVICE_USE_CODEC1(pdev->type)) || (fsize->pixel_format == V4L2_PIX_FMT_PWC2 && DEVICE_USE_CODEC23(pdev->type))) { for (i = 0; i < PSZ_MAX; i++) { if (!(pdev->image_mask & (1UL << i))) continue; if (!index--) { fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = pwc_image_sizes[i][0]; fsize->discrete.height = pwc_image_sizes[i][1]; return 0; } } } return -EINVAL; } static int pwc_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival) { struct pwc_device *pdev = video_drvdata(file); int size = -1; unsigned int i; for (i = 0; i < PSZ_MAX; i++) { if (pwc_image_sizes[i][0] == fival->width && pwc_image_sizes[i][1] == fival->height) { size = i; break; } } /* TODO: Support raw format */ if (size < 0 || fival->pixel_format != V4L2_PIX_FMT_YUV420) return -EINVAL; i = pwc_get_fps(pdev, fival->index, size); if (!i) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete.numerator = 1; fival->discrete.denominator = i; return 0; } static int pwc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm) { struct pwc_device *pdev = video_drvdata(file); if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memset(parm, 0, sizeof(*parm)); parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; parm->parm.capture.readbuffers = MIN_FRAMES; parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME; parm->parm.capture.timeperframe.denominator = pdev->vframes; parm->parm.capture.timeperframe.numerator = 1; return 0; } static int pwc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm) { struct pwc_device *pdev = video_drvdata(file); int compression = 0; int ret, fps; if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; /* If timeperframe == 0, then reset the framerate to the nominal value. We pick a high framerate here, and let pwc_set_video_mode() figure out the best match. */ if (parm->parm.capture.timeperframe.numerator == 0 || parm->parm.capture.timeperframe.denominator == 0) fps = 30; else fps = parm->parm.capture.timeperframe.denominator / parm->parm.capture.timeperframe.numerator; if (vb2_is_busy(&pdev->vb_queue)) return -EBUSY; ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt, fps, &compression, 0); pwc_g_parm(file, fh, parm); return ret; } const struct v4l2_ioctl_ops pwc_ioctl_ops = { .vidioc_querycap = pwc_querycap, .vidioc_enum_input = pwc_enum_input, .vidioc_g_input = pwc_g_input, .vidioc_s_input = pwc_s_input, .vidioc_enum_fmt_vid_cap = pwc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_enum_framesizes = pwc_enum_framesizes, .vidioc_enum_frameintervals = pwc_enum_frameintervals, .vidioc_g_parm = pwc_g_parm, .vidioc_s_parm = pwc_s_parm, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, };
2 4 4 3 2 1 4 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_H #define _LINUX_HIGHMEM_H #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/cacheflush.h> #include <linux/kmsan.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include "highmem-internal.h" /** * kmap - Map a page for long term usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can only be invoked from preemptible task context because on 32bit * systems with CONFIG_HIGHMEM enabled this function might sleep. * * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area * this returns the virtual address of the direct kernel mapping. * * The returned virtual address is globally visible and valid up to the * point where it is unmapped via kunmap(). The pointer can be handed to * other contexts. * * For highmem pages on 32bit systems this can be slow as the mapping space * is limited and protected by a global lock. In case that there is no * mapping slot available the function blocks until a slot is released via * kunmap(). */ static inline void *kmap(struct page *page); /** * kunmap - Unmap the virtual address mapped by kmap() * @page: Pointer to the page which was mapped by kmap() * * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of * pages in the low memory area. */ static inline void kunmap(struct page *page); /** * kmap_to_page - Get the page for a kmap'ed address * @addr: The address to look up * * Returns: The page which is mapped to @addr. */ static inline struct page *kmap_to_page(void *addr); /** * kmap_flush_unused - Flush all unused kmap mappings in order to * remove stray mappings */ static inline void kmap_flush_unused(void); /** * kmap_local_page - Map a page for temporary usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can be invoked from any context, including interrupts. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation: * * addr1 = kmap_local_page(page1); * addr2 = kmap_local_page(page2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While kmap_local_page() is significantly faster than kmap() for the highmem * case it comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_page() can rely on this side effect. */ static inline void *kmap_local_page(struct page *page); /** * kmap_local_folio - Map a page in this folio for temporary usage * @folio: The folio containing the page. * @offset: The byte offset within the folio which identifies the page. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation:: * * addr1 = kmap_local_folio(folio1, offset1); * addr2 = kmap_local_folio(folio2, offset2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While it is significantly faster than kmap() for the highmem case it * comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_folio() can rely on this side effect. * * Context: Can be invoked from any context. * Return: The virtual address of @offset. */ static inline void *kmap_local_folio(struct folio *folio, size_t offset); /** * kmap_atomic - Atomically map a page for temporary usage - Deprecated! * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * In fact a wrapper around kmap_local_page() which also disables pagefaults * and, depending on PREEMPT_RT configuration, also CPU migration and * preemption. Therefore users should not count on the latter two side effects. * * Mappings should always be released by kunmap_atomic(). * * Do not use in new code. Use kmap_local_page() instead. * * It is used in atomic context when code wants to access the contents of a * page that might be allocated from high memory (see __GFP_HIGHMEM), for * example a page in the pagecache. The API has two functions, and they * can be used in a manner similar to the following:: * * // Find the page of interest. * struct page *page = find_get_page(mapping, offset); * * // Gain access to the contents of that page. * void *vaddr = kmap_atomic(page); * * // Do something to the contents of that page. * memset(vaddr, 0, PAGE_SIZE); * * // Unmap that page. * kunmap_atomic(vaddr); * * Note that the kunmap_atomic() call takes the result of the kmap_atomic() * call, not the argument. * * If you need to map two pages because you want to copy from one page to * another you need to keep the kmap_atomic calls strictly nested, like: * * vaddr1 = kmap_atomic(page1); * vaddr2 = kmap_atomic(page2); * * memcpy(vaddr1, vaddr2, PAGE_SIZE); * * kunmap_atomic(vaddr2); * kunmap_atomic(vaddr1); */ static inline void *kmap_atomic(struct page *page); /* Highmem related interfaces for management code */ static inline unsigned long nr_free_highpages(void); static inline unsigned long totalhigh_pages(void); #ifndef ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } #endif #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE static inline void flush_kernel_vmap_range(void *vaddr, int size) { } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { } #endif /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_local_page(page); clear_user_page(addr, vaddr, page); kunmap_local(addr); } #endif #ifndef vma_alloc_zeroed_movable_folio /** * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. * @vma: The VMA the page is to be allocated for. * @vaddr: The virtual address the page will be inserted into. * * This function will allocate a page suitable for inserting into this * VMA at this virtual address. It may be allocated from highmem or * the movable zone. An architecture may provide its own implementation. * * Return: A folio containing one allocated and zeroed page or NULL if * we are out of memory. */ static inline struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { struct folio *folio; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); if (folio && user_alloc_needs_zeroing()) clear_user_highpage(&folio->page, vaddr); return folio; } #endif static inline void clear_highpage(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kaddr); kunmap_local(kaddr); } static inline void clear_highpage_kasan_tagged(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kasan_reset_tag(kaddr)); kunmap_local(kaddr); } #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE static inline void tag_clear_highpage(struct page *page) { } #endif /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. */ #ifdef CONFIG_HIGHMEM void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); #else static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_local_page(page); unsigned int i; BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); kunmap_local(kaddr); for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } #endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) { zero_user_segments(page, start, end, 0, 0); } static inline void zero_user(struct page *page, unsigned start, unsigned size) { zero_user_segments(page, start, start + size, 0, 0); } #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_user_page(vto, vfrom, vaddr, to); kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_page(vto, vfrom); kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifdef copy_mc_to_kernel /* * If architecture supports machine check exception handling, define the * #MC versions of copy_user_highpage and copy_highpage. They copy a memory * page with #MC in source page (@from) handled, and return the number * of bytes not copied if there was a #MC, otherwise 0 for success. */ static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } static inline int copy_mc_highpage(struct page *to, struct page *from) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } #else static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, size_t len) { char *dst = kmap_local_page(dst_page); char *src = kmap_local_page(src_page); VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); memcpy(dst + dst_off, src + src_off, len); kunmap_local(src); kunmap_local(dst); } static inline void memset_page(struct page *page, size_t offset, int val, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, val, len); kunmap_local(addr); } static inline void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to, from + offset, len); kunmap_local(from); } static inline void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) { char *to = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, 0, len); flush_dcache_page(page); kunmap_local(addr); } /** * memcpy_from_folio - Copy a range of bytes from a folio. * @to: The memory to copy to. * @folio: The folio to read from. * @offset: The first byte in the folio to read. * @len: The number of bytes to copy. */ static inline void memcpy_from_folio(char *to, struct folio *folio, size_t offset, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { const char *from = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_highmem(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(from); to += chunk; offset += chunk; len -= chunk; } while (len > 0); } /** * memcpy_to_folio - Copy a range of bytes to a folio. * @folio: The folio to write to. * @offset: The first byte in the folio to store to. * @from: The memory to copy from. * @len: The number of bytes to copy. */ static inline void memcpy_to_folio(struct folio *folio, size_t offset, const char *from, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { char *to = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_highmem(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(to); from += chunk; offset += chunk; len -= chunk; } while (len > 0); flush_dcache_folio(folio); } /** * folio_zero_tail - Zero the tail of a folio. * @folio: The folio to zero. * @offset: The byte offset in the folio to start zeroing at. * @kaddr: The address the folio is currently mapped to. * * If you have already used kmap_local_folio() to map a folio, written * some data to it and now need to zero the end of the folio (and flush * the dcache), you can use this function. If you do not have the * folio kmapped (eg the folio has been partially populated by DMA), * use folio_zero_range() or folio_zero_segment() instead. * * Return: An address which can be passed to kunmap_local(). */ static inline __must_check void *folio_zero_tail(struct folio *folio, size_t offset, void *kaddr) { size_t len = folio_size(folio) - offset; if (folio_test_highmem(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memset(kaddr, 0, max); kunmap_local(kaddr); len -= max; offset += max; max = PAGE_SIZE; kaddr = kmap_local_folio(folio, offset); } } memset(kaddr, 0, len); flush_dcache_folio(folio); return kaddr; } /** * folio_fill_tail - Copy some data to a folio and pad with zeroes. * @folio: The destination folio. * @offset: The offset into @folio at which to start copying. * @from: The data to copy. * @len: How many bytes of data to copy. * * This function is most useful for filesystems which support inline data. * When they want to copy data from the inode into the page cache, this * function does everything for them. It supports large folios even on * HIGHMEM configurations. */ static inline void folio_fill_tail(struct folio *folio, size_t offset, const char *from, size_t len) { char *to = kmap_local_folio(folio, offset); VM_BUG_ON(offset + len > folio_size(folio)); if (folio_test_highmem(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memcpy(to, from, max); kunmap_local(to); len -= max; from += max; offset += max; max = PAGE_SIZE; to = kmap_local_folio(folio, offset); } } memcpy(to, from, len); to = folio_zero_tail(folio, offset + len, to + len); kunmap_local(to); } /** * memcpy_from_file_folio - Copy some bytes from a file folio. * @to: The destination buffer. * @folio: The folio to copy from. * @pos: The position in the file. * @len: The maximum number of bytes to copy. * * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE * if the folio comes from HIGHMEM, and by the size of the folio. * * Return: The number of bytes copied from the folio. */ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, loff_t pos, size_t len) { size_t offset = offset_in_folio(folio, pos); char *from = kmap_local_folio(folio, offset); if (folio_test_highmem(folio)) { offset = offset_in_page(offset); len = min_t(size_t, len, PAGE_SIZE - offset); } else len = min(len, folio_size(folio) - offset); memcpy(to, from, len); kunmap_local(from); return len; } /** * folio_zero_segments() - Zero two byte ranges in a folio. * @folio: The folio to write to. * @start1: The first byte to zero. * @xend1: One more than the last byte in the first range. * @start2: The first byte to zero in the second range. * @xend2: One more than the last byte in the second range. */ static inline void folio_zero_segments(struct folio *folio, size_t start1, size_t xend1, size_t start2, size_t xend2) { zero_user_segments(&folio->page, start1, xend1, start2, xend2); } /** * folio_zero_segment() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @xend: One more than the last byte to zero. */ static inline void folio_zero_segment(struct folio *folio, size_t start, size_t xend) { zero_user_segments(&folio->page, start, xend, 0, 0); } /** * folio_zero_range() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @length: The number of bytes to zero. */ static inline void folio_zero_range(struct folio *folio, size_t start, size_t length) { zero_user_segments(&folio->page, start, start + length, 0, 0); } /** * folio_release_kmap - Unmap a folio and drop a refcount. * @folio: The folio to release. * @addr: The address previously returned by a call to kmap_local_folio(). * * It is common, eg in directory handling to kmap a folio. This function * unmaps the folio and drops the refcount that was being held to keep the * folio alive while we accessed it. */ static inline void folio_release_kmap(struct folio *folio, void *addr) { kunmap_local(addr); folio_put(folio); } static inline void unmap_and_put_page(struct page *page, void *addr) { folio_release_kmap(page_folio(page), addr); } #endif /* _LINUX_HIGHMEM_H */
113 112 111 113 113 112 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2023 Isovalent */ #ifndef __NET_TCX_H #define __NET_TCX_H #include <linux/bpf.h> #include <linux/bpf_mprog.h> #include <net/sch_generic.h> struct mini_Qdisc; struct tcx_entry { struct mini_Qdisc __rcu *miniq; struct bpf_mprog_bundle bundle; u32 miniq_active; struct rcu_head rcu; }; struct tcx_link { struct bpf_link link; struct net_device *dev; u32 location; }; static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress) { #ifdef CONFIG_NET_XGRESS skb->tc_at_ingress = ingress; #endif } #ifdef CONFIG_NET_XGRESS static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry) { struct bpf_mprog_bundle *bundle = entry->parent; return container_of(bundle, struct tcx_entry, bundle); } static inline struct tcx_link *tcx_link(const struct bpf_link *link) { return container_of(link, struct tcx_link, link); } void tcx_inc(void); void tcx_dec(void); static inline void tcx_entry_sync(void) { /* bpf_mprog_entry got a/b swapped, therefore ensure that * there are no inflight users on the old one anymore. */ synchronize_rcu(); } static inline void tcx_entry_update(struct net_device *dev, struct bpf_mprog_entry *entry, bool ingress) { ASSERT_RTNL(); if (ingress) rcu_assign_pointer(dev->tcx_ingress, entry); else rcu_assign_pointer(dev->tcx_egress, entry); } static inline struct bpf_mprog_entry * tcx_entry_fetch(struct net_device *dev, bool ingress) { ASSERT_RTNL(); if (ingress) return rcu_dereference_rtnl(dev->tcx_ingress); else return rcu_dereference_rtnl(dev->tcx_egress); } static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void) { struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL); if (tcx) { bpf_mprog_bundle_init(&tcx->bundle); return &tcx->bundle.a; } return NULL; } #define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__)) static inline void tcx_entry_free(struct bpf_mprog_entry *entry) { kfree_rcu(tcx_entry(entry), rcu); } static inline struct bpf_mprog_entry * tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created) { struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress); *created = false; if (!entry) { entry = tcx_entry_create(); if (!entry) return NULL; *created = true; } return entry; } static inline void tcx_skeys_inc(bool ingress) { tcx_inc(); if (ingress) net_inc_ingress_queue(); else net_inc_egress_queue(); } static inline void tcx_skeys_dec(bool ingress) { if (ingress) net_dec_ingress_queue(); else net_dec_egress_queue(); tcx_dec(); } static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry) { ASSERT_RTNL(); tcx_entry(entry)->miniq_active++; } static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry) { ASSERT_RTNL(); tcx_entry(entry)->miniq_active--; } static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry) { ASSERT_RTNL(); return bpf_mprog_total(entry) || tcx_entry(entry)->miniq_active; } static inline enum tcx_action_base tcx_action_code(struct sk_buff *skb, int code) { switch (code) { case TCX_PASS: skb->tc_index = qdisc_skb_cb(skb)->tc_classid; fallthrough; case TCX_DROP: case TCX_REDIRECT: return code; case TCX_NEXT: default: return TCX_NEXT; } } #endif /* CONFIG_NET_XGRESS */ #if defined(CONFIG_NET_XGRESS) && defined(CONFIG_BPF_SYSCALL) int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); void tcx_uninstall(struct net_device *dev, bool ingress); int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); static inline void dev_tcx_uninstall(struct net_device *dev) { ASSERT_RTNL(); tcx_uninstall(dev, true); tcx_uninstall(dev, false); } #else static inline int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EINVAL; } static inline int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EINVAL; } static inline int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EINVAL; } static inline int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { return -EINVAL; } static inline void dev_tcx_uninstall(struct net_device *dev) { } #endif /* CONFIG_NET_XGRESS && CONFIG_BPF_SYSCALL */ #endif /* __NET_TCX_H */
3202 3162 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2006 Nick Piggin * Copyright (C) 2012 Konstantin Khlebnikov */ #ifndef _LINUX_RADIX_TREE_H #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> #include <linux/gfp_types.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/math.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/xarray.h> #include <linux/local_lock.h> /* Keep unconverted code working */ #define radix_tree_root xarray #define radix_tree_node xa_node struct radix_tree_preload { local_lock_t lock; unsigned nr; /* nodes->parent points to next preallocated node */ struct radix_tree_node *nodes; }; DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer * 10 - internal entry * x1 - value entry * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). * This means that storing a NULL entry in the tree is the same as deleting * the entry from the tree. */ #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_INTERNAL_NODE 2UL static inline bool radix_tree_is_internal_node(void *ptr) { return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == RADIX_TREE_INTERNAL_NODE; } /*** radix-tree API starts here ***/ #define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) #define RADIX_TREE_MAX_TAGS XA_MAX_MARKS #define RADIX_TREE_TAG_LONGS XA_MARK_LONGS #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) /* The IDR tag is stored in the low bits of xa_flags */ #define ROOT_IS_IDR ((__force gfp_t)4) /* The top bits of xa_flags are used to store the root tags */ #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) #define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(name, mask) #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->xa_head == NULL; } /** * struct radix_tree_iter - radix tree iterator state * * @index: index of current slot * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating * @node: node that contains current slot * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is * described by a pointer to its first slot and a struct radix_tree_iter * which holds the chunk's position in the tree and its size. For tagged * iteration radix_tree_iter also holds the slots' bit-mask for one chosen * radix tree tag. */ struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; struct radix_tree_node *node; }; /** * Radix-tree synchronization * * The radix-tree API requires that users provide all synchronisation (with * specific exceptions, noted below). * * Synchronization of access to the data items being stored in the tree, and * management of their lifetimes must be completely managed by API users. * * For API usage, in general, * - any function _modifying_ the tree or tags (inserting or deleting * items, setting or clearing tags) must exclude other modifications, and * exclude any functions reading the tree. * - any function _reading_ the tree or tags (looking up items or tags, * gang lookups) must exclude modifications to the tree, but may occur * concurrently with other readers. * * The notable exceptions to this rule are the following functions: * __radix_tree_lookup * radix_tree_lookup * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup * radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. * * It is still required that the caller manage the synchronization and lifetimes * of the items. So if RCU lock-free lookups are used, typically this would mean * that the items have their own locks, or are amenable to lock-free access; and * that the items are freed by RCU (or only freed after having been deleted from * the radix tree *and* a synchronize_rcu() grace period). * * (Note, rcu_assign_pointer and rcu_dereference are not needed to control * access to data items when inserting into or looking up from the radix tree) * * Note that the value returned by radix_tree_tag_get() may not be relied upon * if only the RCU read lock is held. Functions to set/clear tags and to * delete nodes running concurrently with it may affect its result such that * two consecutive reads in the same locked section may return different * values. If reliability is required, modification functions must also be * excluded from concurrency. * * radix_tree_tagged is able to be called without locking or RCU. */ /** * radix_tree_deref_slot - dereference a slot * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is * held (ie. items cannot be concurrently inserted). * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. * * Return: entry stored in that slot. */ static inline void *radix_tree_deref_slot(void __rcu **slot) { return rcu_dereference(*slot); } /** * radix_tree_deref_slot_protected - dereference a slot with tree lock held * @slot: slot pointer, returned by radix_tree_lookup_slot * * Similar to radix_tree_deref_slot. The caller does not hold the RCU read * lock but it must hold the tree lock to prevent parallel updates. * * Return: entry stored in that slot. */ static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** * radix_tree_deref_retry - check radix_tree_deref_slot * @arg: pointer returned by radix_tree_deref_slot * Returns: 0 if retry is not required, otherwise retry is required * * radix_tree_deref_retry must be used with radix_tree_deref_slot. */ static inline int radix_tree_deref_retry(void *arg) { return unlikely(radix_tree_is_internal_node(arg)); } /** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. */ static inline int radix_tree_exception(void *arg) { return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } int radix_tree_insert(struct radix_tree_root *, unsigned long index, void *); void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, unsigned long index); void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, void __rcu **slot, void *entry); void radix_tree_iter_replace(struct radix_tree_root *, const struct radix_tree_iter *, void __rcu **slot, void *entry); void radix_tree_replace_slot(struct radix_tree_root *, void __rcu **slot, void *entry); void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, void __rcu ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { local_unlock(&radix_tree_preloads.lock); } void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max); enum { RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ }; /** * radix_tree_iter_init - initialize radix tree iterator * * @iter: pointer to iterator state * @start: iteration starting index * Returns: NULL */ static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it * in the case of a successful tagged chunk lookup. If the lookup was * unsuccessful or non-tagged then nobody cares about ->tags. * * Set index to zero to bypass next_index overflow protection. * See the comment in radix_tree_next_chunk() for details. */ iter->index = 0; iter->next_index = start; return NULL; } /** * radix_tree_next_chunk - find next chunk of slots for iteration * * @root: radix tree root * @iter: iterator state * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if there no more left * * This function looks up the next chunk in the radix tree starting from * @iter->next_index. It returns a pointer to the chunk's first slot. * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** * radix_tree_iter_lookup - look up an index in the radix tree * @root: radix tree root * @iter: iterator state * @index: key to look up * * If @index is present in the radix tree, this function returns the slot * containing it and updates @iter to describe the entry. If @index is not * present, it returns NULL. */ static inline void __rcu ** radix_tree_iter_lookup(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned long index) { radix_tree_iter_init(iter, index); return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); } /** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * * If we iterate over a tree protected only by the RCU lock, a race * against deletion or creation may result in seeing a slot for which * radix_tree_deref_retry() returns true. If so, call this function * and continue the iteration. */ static inline __must_check void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; return NULL; } static inline unsigned long __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) { return iter->index + slots; } /** * radix_tree_iter_resume - resume iterating when the chunk may be invalid * @slot: pointer to current slot * @iter: iterator state * Returns: New slot pointer * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** * radix_tree_chunk_size - get current chunk size * * @iter: pointer to radix tree iterator * Returns: current chunk size */ static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { return iter->next_index - iter->index; } /** * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot * @iter: pointer to iterator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * * This function updates @iter->index in the case of a successful lookup. * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this * function. These cases result from the use of radix_tree_iter_resume() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); slot++; goto found; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); iter->tags >>= offset++; iter->index = __radix_tree_iter_add(iter, offset); slot += offset; goto found; } } else { long count = radix_tree_chunk_size(iter); while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); if (likely(*slot)) goto found; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; break; } } } return NULL; found: return slot; } /** * radix_tree_for_each_slot - iterate over non-empty slots * * @slot: the void** variable for pointer to slot * @root: the struct radix_tree_root pointer * @iter: the struct radix_tree_iter pointer * @start: iteration starting index * * @slot points to radix tree slot, @iter->index contains its index. */ #define radix_tree_for_each_slot(slot, root, iter, start) \ for (slot = radix_tree_iter_init(iter, start) ; \ slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ slot = radix_tree_next_slot(slot, iter, 0)) /** * radix_tree_for_each_tagged - iterate over tagged slots * * @slot: the void** variable for pointer to slot * @root: the struct radix_tree_root pointer * @iter: the struct radix_tree_iter pointer * @start: iteration starting index * @tag: tag index * * @slot points to radix tree slot, @iter->index contains its index. */ #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ for (slot = radix_tree_iter_init(iter, start) ; \ slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ RADIX_TREE_ITER_TAGGED | tag)) #endif /* _LINUX_RADIX_TREE_H */
3 3 3 3 3 3 3 1 3 3 3 3 2 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 4 1 3 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 /* * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices * * Author : Liu Junliang <liujunliang_ljl@163.com> * * Based on dm9601.c * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include "sr9700.h" static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data) { int err; err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data, length); if ((err != length) && (err >= 0)) err = -EINVAL; return err; } static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data) { int err; err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, length); if ((err >= 0) && (err < length)) err = -EINVAL; return err; } static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value) { return sr_read(dev, reg, 1, value); } static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value) { return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, value, reg, NULL, 0); } static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, const void *data) { usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, length); } static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value) { usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, value, reg, NULL, 0); } static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) { int i; for (i = 0; i < SR_SHARE_TIMEOUT; i++) { u8 tmp = 0; int ret; udelay(1); ret = sr_read_reg(dev, SR_EPCR, &tmp); if (ret < 0) return ret; /* ready */ if (!(tmp & EPCR_ERRE)) return 0; } netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom"); return -EIO; } static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) { int ret; mutex_lock(&dev->phy_mutex); sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; sr_write_reg(dev, SR_EPCR, 0x0); ret = sr_read(dev, SR_EPDR, 2, value); netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", phy, reg, *value, ret); out_unlock: mutex_unlock(&dev->phy_mutex); return ret; } static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, __le16 value) { int ret; mutex_lock(&dev->phy_mutex); ret = sr_write(dev, SR_EPDR, 2, &value); if (ret < 0) goto out_unlock; sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : (EPCR_WEP | EPCR_ERPRW)); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; sr_write_reg(dev, SR_EPCR, 0x0); out_unlock: mutex_unlock(&dev->phy_mutex); return ret; } static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value) { return sr_share_read_word(dev, 0, offset, value); } static int sr9700_get_eeprom_len(struct net_device *netdev) { return SR_EEPROM_LEN; } static int sr9700_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *data) { struct usbnet *dev = netdev_priv(netdev); __le16 *buf = (__le16 *)data; int ret = 0; int i; /* access is 16bit */ if ((eeprom->offset & 0x01) || (eeprom->len & 0x01)) return -EINVAL; for (i = 0; i < eeprom->len / 2; i++) { ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i); if (ret < 0) break; } return ret; } static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); int err, res; __le16 word; int rc = 0; if (phy_id) { netdev_dbg(netdev, "Only internal phy supported\n"); return 0; } /* Access NSR_LINKST bit for link status instead of MII_BMSR */ if (loc == MII_BMSR) { u8 value; err = sr_read_reg(dev, SR_NSR, &value); if (err < 0) return err; if (value & NSR_LINKST) rc = 1; } err = sr_share_read_word(dev, 1, loc, &word); if (err < 0) return err; if (rc == 1) res = le16_to_cpu(word) | BMSR_LSTATUS; else res = le16_to_cpu(word) & ~BMSR_LSTATUS; netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", phy_id, loc, res); return res; } static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); if (phy_id) { netdev_dbg(netdev, "Only internal phy supported\n"); return; } netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); sr_share_write_word(dev, 1, loc, res); } static u32 sr9700_get_link(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); u8 value = 0; int rc = 0; /* Get the Link Status directly */ sr_read_reg(dev, SR_NSR, &value); if (value & NSR_LINKST) rc = 1; return rc; } static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static const struct ethtool_ops sr9700_ethtool_ops = { .get_drvinfo = usbnet_get_drvinfo, .get_link = sr9700_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = sr9700_get_eeprom_len, .get_eeprom = sr9700_get_eeprom, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static void sr9700_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); /* We use the 20 byte dev->data for our 8 byte filter buffer * to avoid allocating memory that is tricky to free later */ u8 *hashes = (u8 *)&dev->data; /* rx_ctl setting : enable, disable_long, disable_crc */ u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG; memset(hashes, 0x00, SR_MCAST_SIZE); /* broadcast address */ hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG; if (netdev->flags & IFF_PROMISC) { rx_ctl |= RCR_PRMSC; } else if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > SR_MCAST_MAX) { rx_ctl |= RCR_RUNT; } else if (!netdev_mc_empty(netdev)) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; hashes[crc >> 3] |= 1 << (crc & 0x7); } } sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes); sr_write_reg_async(dev, SR_RCR, rx_ctl); } static int sr9700_set_mac_address(struct net_device *netdev, void *p) { struct usbnet *dev = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) { netdev_err(netdev, "not setting invalid mac address %pM\n", addr->sa_data); return -EINVAL; } eth_hw_addr_set(netdev, addr->sa_data); sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); return 0; } static const struct net_device_ops sr9700_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = sr9700_ioctl, .ndo_set_rx_mode = sr9700_set_multicast, .ndo_set_mac_address = sr9700_set_mac_address, }; static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) { struct net_device *netdev; struct mii_if_info *mii; u8 addr[ETH_ALEN]; int ret; ret = usbnet_get_endpoints(dev, intf); if (ret) goto out; netdev = dev->net; netdev->netdev_ops = &sr9700_netdev_ops; netdev->ethtool_ops = &sr9700_ethtool_ops; netdev->hard_header_len += SR_TX_OVERHEAD; dev->hard_mtu = netdev->mtu + netdev->hard_header_len; /* bulkin buffer is preferably not less than 3K */ dev->rx_urb_size = 3072; mii = &dev->mii; mii->dev = netdev; mii->mdio_read = sr_mdio_read; mii->mdio_write = sr_mdio_write; mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; sr_write_reg(dev, SR_NCR, NCR_RST); udelay(20); /* read MAC * After Chip Power on, the Chip will reload the MAC from * EEPROM automatically to PAR. In case there is no EEPROM externally, * a default MAC address is stored in PAR for making chip work properly. */ if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) { netdev_err(netdev, "Error reading MAC address\n"); ret = -ENODEV; goto out; } eth_hw_addr_set(netdev, addr); /* power up and reset phy */ sr_write_reg(dev, SR_PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ msleep(20); sr_write_reg(dev, SR_PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); /* receive broadcast packets */ sr9700_set_multicast(netdev); sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET); sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); mii_nway_restart(mii); out: return ret; } static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct sk_buff *sr_skb; int len; /* skb content (packets) format : * p0 p1 p2 ...... pm * / \ * / \ * / \ * / \ * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn * * p0 : packet 0 * p0b0 : packet 0 byte 0 * * b0: rx status * b1: packet length (incl crc) low * b2: packet length (incl crc) high * b3..n-4: packet data * bn-3..bn: ethernet packet crc */ if (unlikely(skb->len < SR_RX_OVERHEAD)) { netdev_err(dev->net, "unexpected tiny rx frame\n"); return 0; } /* one skb may contains multiple packets */ while (skb->len > SR_RX_OVERHEAD) { if (skb->data[0] != 0x40) return 0; /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; if (len > ETH_FRAME_LEN || len > skb->len || len < 0) return 0; /* the last packet of current skb */ if (skb->len == (len + SR_RX_OVERHEAD)) { skb_pull(skb, 3); skb->len = len; skb_set_tail_pointer(skb, len); return 2; } sr_skb = netdev_alloc_skb_ip_align(dev->net, len); if (!sr_skb) return 0; skb_put(sr_skb, len); memcpy(sr_skb->data, skb->data + 3, len); usbnet_skb_return(dev, sr_skb); skb_pull(skb, len + SR_RX_OVERHEAD); } return 0; } static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int len; /* SR9700 can only send out one ethernet packet at once. * * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn * * b0: rx status * b1: packet length (incl crc) low * b2: packet length (incl crc) high * b3..n-4: packet data * bn-3..bn: ethernet packet crc */ len = skb->len; if (skb_cow_head(skb, SR_TX_OVERHEAD)) { dev_kfree_skb_any(skb); return NULL; } __skb_push(skb, SR_TX_OVERHEAD); /* usbnet adds padding if length is a multiple of packet size * if so, adjust length value in header */ if ((skb->len % dev->maxpacket) == 0) len++; skb->data[0] = len; skb->data[1] = len >> 8; return skb; } static void sr9700_status(struct usbnet *dev, struct urb *urb) { int link; u8 *buf; /* format: b0: net status b1: tx status 1 b2: tx status 2 b3: rx status b4: rx overflow b5: rx count b6: tx count b7: gpr */ if (urb->actual_length < 8) return; buf = urb->transfer_buffer; link = !!(buf[0] & 0x40); if (netif_carrier_ok(dev->net) != link) { usbnet_link_change(dev, link, 1); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static int sr9700_link_reset(struct usbnet *dev) { struct ethtool_cmd ecmd; mii_check_media(&dev->mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n", ecmd.speed, ecmd.duplex); return 0; } static const struct driver_info sr9700_driver_info = { .description = "CoreChip SR9700 USB Ethernet", .flags = FLAG_ETHER, .bind = sr9700_bind, .rx_fixup = sr9700_rx_fixup, .tx_fixup = sr9700_tx_fixup, .status = sr9700_status, .link_reset = sr9700_link_reset, .reset = sr9700_link_reset, }; static const struct usb_device_id products[] = { { USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */ .driver_info = (unsigned long)&sr9700_driver_info, }, {}, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver sr9700_usb_driver = { .name = "sr9700", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(sr9700_usb_driver); MODULE_AUTHOR("liujl <liujunliang_ljl@163.com>"); MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * VLAN An implementation of 802.1Q VLAN tagging. * * Authors: Ben Greear <greearb@candelatech.com> */ #ifndef _LINUX_IF_VLAN_H_ #define _LINUX_IF_VLAN_H_ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/bug.h> #include <uapi/linux/if_vlan.h> #define VLAN_HLEN 4 /* The additional bytes required by VLAN * (in addition to the Ethernet header) */ #define VLAN_ETH_HLEN 18 /* Total octets in header. */ #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ /* * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan */ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ #define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID * @h_vlan_encapsulated_proto: packet type ID or len */ struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; /** * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) * @h_dest: destination ethernet address * @h_source: source ethernet address * @h_vlan_proto: ethernet protocol * @h_vlan_TCI: priority and VLAN ID * @h_vlan_encapsulated_proto: packet type ID or len */ struct vlan_ethhdr { struct_group(addrs, unsigned char h_dest[ETH_ALEN]; unsigned char h_source[ETH_ALEN]; ); __be16 h_vlan_proto; __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; #include <linux/skbuff.h> static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) { return (struct vlan_ethhdr *)skb_mac_header(skb); } /* Prefer this version in TX path, instead of * skb_reset_mac_header() + vlan_eth_hdr() */ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) { return (struct vlan_ethhdr *)skb->data; } #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ #define VLAN_N_VID 4096 /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); static inline bool is_vlan_dev(const struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; } #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) { ASSERT_RTNL(); return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); } static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) { ASSERT_RTNL(); call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); } static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) { ASSERT_RTNL(); return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); } static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) { ASSERT_RTNL(); call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); } /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats * @rx_packets: number of received packets * @rx_bytes: number of received bytes * @rx_multicast: number of received multicast packets * @tx_packets: number of transmitted packets * @tx_bytes: number of transmitted bytes * @syncp: synchronization point for 64bit counters * @rx_errors: number of rx errors * @tx_dropped: number of tx drops */ struct vlan_pcpu_stats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t rx_multicast; u64_stats_t tx_packets; u64_stats_t tx_bytes; struct u64_stats_sync syncp; u32 rx_errors; u32 tx_dropped; }; #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); extern int vlan_for_each(struct net_device *dev, int (*action)(struct net_device *dev, int vid, void *arg), void *arg); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); /** * struct vlan_priority_tci_mapping - vlan egress priority mappings * @priority: skb priority * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 * @next: pointer to next struct */ struct vlan_priority_tci_mapping { u32 priority; u16 vlan_qos; struct vlan_priority_tci_mapping *next; }; struct proc_dir_entry; struct netpoll; /** * struct vlan_dev_priv - VLAN private device data * @nr_ingress_mappings: number of ingress priority mappings * @ingress_priority_map: ingress priority mappings * @nr_egress_mappings: number of egress priority mappings * @egress_priority_map: hash of egress priority mappings * @vlan_proto: VLAN encapsulation protocol * @vlan_id: VLAN identifier * @flags: device flags * @real_dev: underlying netdevice * @dev_tracker: refcount tracker for @real_dev reference * @real_dev_addr: address of underlying netdevice * @dent: proc dir entry * @vlan_pcpu_stats: ptr to percpu rx stats * @netpoll: netpoll instance "propagated" down to @real_dev */ struct vlan_dev_priv { unsigned int nr_ingress_mappings; u32 ingress_priority_map[8]; unsigned int nr_egress_mappings; struct vlan_priority_tci_mapping *egress_priority_map[16]; __be16 vlan_proto; u16 vlan_id; u16 flags; struct net_device *real_dev; netdevice_tracker dev_tracker; unsigned char real_dev_addr[ETH_ALEN]; struct proc_dir_entry *dent; struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif }; static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) { return netdev_priv(dev); } static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) { struct vlan_priority_tci_mapping *mp; smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; while (mp) { if (mp->priority == skprio) { return mp->vlan_qos; /* This should already be shifted * to mask correctly with the * VLAN's TCI */ } mp = mp->next; } return 0; } extern bool vlan_do_receive(struct sk_buff **skb); extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); extern int vlan_vids_add_by_dev(struct net_device *dev, const struct net_device *by_dev); extern void vlan_vids_del_by_dev(struct net_device *dev, const struct net_device *by_dev); extern bool vlan_uses_dev(const struct net_device *dev); #else static inline struct net_device * __vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id) { return NULL; } static inline int vlan_for_each(struct net_device *dev, int (*action)(struct net_device *dev, int vid, void *arg), void *arg) { return 0; } static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { BUG(); return NULL; } static inline u16 vlan_dev_vlan_id(const struct net_device *dev) { BUG(); return 0; } static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) { BUG(); return 0; } static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) { return 0; } static inline bool vlan_do_receive(struct sk_buff **skb) { return false; } static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) { return 0; } static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) { } static inline int vlan_vids_add_by_dev(struct net_device *dev, const struct net_device *by_dev) { return 0; } static inline void vlan_vids_del_by_dev(struct net_device *dev, const struct net_device *by_dev) { } static inline bool vlan_uses_dev(const struct net_device *dev) { return false; } #endif /** * eth_type_vlan - check for valid vlan ether type. * @ethertype: ether type to check * * Returns: true if the ether type is a vlan ether type. */ static inline bool eth_type_vlan(__be16 ethertype) { switch (ethertype) { case htons(ETH_P_8021Q): case htons(ETH_P_8021AD): return true; default: return false; } } static inline bool vlan_hw_offload_capable(netdev_features_t features, __be16 proto) { if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) return true; if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) return true; return false; } /** * __vlan_insert_inner_tag - inner VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * @mac_len: MAC header length including outer vlan headers * * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Does not change skb->protocol so this function can be used during receive. * * Returns: error if skb_cow_head fails. */ static inline int __vlan_insert_inner_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci, unsigned int mac_len) { struct vlan_ethhdr *veth; if (skb_cow_head(skb, VLAN_HLEN) < 0) return -ENOMEM; skb_push(skb, VLAN_HLEN); /* Move the mac header sans proto to the beginning of the new header. */ if (likely(mac_len > ETH_TLEN)) memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); if (skb_mac_header_was_set(skb)) skb->mac_header -= VLAN_HLEN; veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); /* first, the ethernet type */ if (likely(mac_len >= ETH_TLEN)) { /* h_vlan_encapsulated_proto should already be populated, and * skb->data has space for h_vlan_proto */ veth->h_vlan_proto = vlan_proto; } else { /* h_vlan_encapsulated_proto should not be populated, and * skb->data has no space for h_vlan_proto */ veth->h_vlan_encapsulated_proto = skb->protocol; } /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); return 0; } /** * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload * Does not change skb->protocol so this function can be used during receive. * * Returns: error if skb_cow_head fails. */ static inline int __vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); } /** * vlan_insert_inner_tag - inner VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * @mac_len: MAC header length including outer vlan headers * * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. * * Does not change skb->protocol so this function can be used during receive. * * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci, unsigned int mac_len) { int err; err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); if (err) { dev_kfree_skb_any(skb); return NULL; } return skb; } /** * vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. * * Does not change skb->protocol so this function can be used during receive. * * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); } /** * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. * * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) skb->protocol = vlan_proto; return skb; } /** * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info * @skb: skbuff to clear * * Clears the VLAN information from @skb */ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) { skb->vlan_all = 0; } /** * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb * @dst: skbuff to copy to * @src: skbuff to copy from * * Copies VLAN information from @src to @dst (for branchless code) */ static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) { dst->vlan_all = src->vlan_all; } /* * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag * * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (likely(skb)) __vlan_hwaccel_clear_tag(skb); return skb; } /** * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb->vlan_proto = vlan_proto; skb->vlan_tci = vlan_tci; } /** * __vlan_get_tag - get the VLAN ID that is part of the payload * @skb: skbuff to query * @vlan_tci: buffer to store value * * Returns: error if the skb is not of VLAN type */ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); if (!eth_type_vlan(veth->h_vlan_proto)) return -ENODATA; *vlan_tci = ntohs(veth->h_vlan_TCI); return 0; } /** * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] * @skb: skbuff to query * @vlan_tci: buffer to store value * * Returns: error if @skb->vlan_tci is not set correctly */ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { if (skb_vlan_tag_present(skb)) { *vlan_tci = skb_vlan_tag_get(skb); return 0; } else { *vlan_tci = 0; return -ENODATA; } } /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query * @vlan_tci: buffer to store value * * Returns: error if the skb is not VLAN tagged */ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { return __vlan_hwaccel_get_tag(skb, vlan_tci); } else { return __vlan_get_tag(skb, vlan_tci); } } /** * __vlan_get_protocol_offset() - get protocol EtherType. * @skb: skbuff to query * @type: first vlan protocol * @mac_offset: MAC offset * @depth: buffer to store length of eth and vlan tags in bytes * * Returns: the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb, __be16 type, int mac_offset, int *depth) { unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at * ETH_HLEN otherwise */ if (eth_type_vlan(type)) { if (vlan_depth) { if (WARN_ON(vlan_depth < VLAN_HLEN)) return 0; vlan_depth -= VLAN_HLEN; } else { vlan_depth = ETH_HLEN; } do { struct vlan_hdr vhdr, *vh; vh = skb_header_pointer(skb, mac_offset + vlan_depth, sizeof(vhdr), &vhdr); if (unlikely(!vh || !--parse_depth)) return 0; type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); } if (depth) *depth = vlan_depth; return type; } static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, int *depth) { return __vlan_get_protocol_offset(skb, type, 0, depth); } /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query * * Returns: the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } /* This version of __vlan_get_protocol() also pulls mac header in skb->head */ static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb, __be16 type, int *depth) { int maclen; type = __vlan_get_protocol(skb, type, &maclen); if (type) { if (!pskb_may_pull(skb, maclen)) type = 0; else if (depth) *depth = maclen; } return type; } /* A getter for the SKB protocol field which will handle VLAN tags consistently * whether VLAN acceleration is enabled or not. */ static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) { if (!skip_vlan) /* VLAN acceleration strips the VLAN header from the skb and * moves it to skb->vlan_proto */ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; return vlan_get_protocol(skb); } static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { __be16 proto; unsigned short *rawp; /* * Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ proto = vhdr->h_vlan_encapsulated_proto; if (eth_proto_is_802_3(proto)) { skb->protocol = proto; return; } rawp = (unsigned short *)(vhdr + 1); if (*rawp == 0xFFFF) /* * This is a magic hack to spot IPX packets. Older Novell * breaks the protocol design and runs IPX over 802.3 without * an 802.2 LLC layer. We look for FFFF which isn't a used * 802.2 SSAP/DSAP. This won't work for fault tolerant netware * but does for the rest. */ skb->protocol = htons(ETH_P_802_3); else /* * Real 802.2 LLC */ skb->protocol = htons(ETH_P_802_2); } /** * vlan_remove_tag - remove outer VLAN tag from payload * @skb: skbuff to remove tag from * @vlan_tci: buffer to store value * * Expects the skb to contain a VLAN tag in the payload, and to have skb->data * pointing at the MAC header. * * Returns: a new pointer to skb->data, or NULL on failure to pull. */ static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci) { struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); *vlan_tci = ntohs(vhdr->h_vlan_TCI); memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); vlan_set_encap_proto(skb, vhdr); return __skb_pull(skb, VLAN_HLEN); } /** * skb_vlan_tagged - check if skb is vlan tagged. * @skb: skbuff to query * * Returns: true if the skb is tagged, regardless of whether it is hardware * accelerated or not. */ static inline bool skb_vlan_tagged(const struct sk_buff *skb) { if (!skb_vlan_tag_present(skb) && likely(!eth_type_vlan(skb->protocol))) return false; return true; } /** * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. * @skb: skbuff to query * * Returns: true if the skb is tagged with multiple vlan headers, regardless * of whether it is hardware accelerated or not. */ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) { __be16 protocol = skb->protocol; if (!skb_vlan_tag_present(skb)) { struct vlan_ethhdr *veh; if (likely(!eth_type_vlan(protocol))) return false; if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) return false; veh = skb_vlan_eth_hdr(skb); protocol = veh->h_vlan_encapsulated_proto; } if (!eth_type_vlan(protocol)) return false; return true; } /** * vlan_features_check - drop unsafe features for skb with multiple tags. * @skb: skbuff to query * @features: features to be checked * * Returns: features without unsafe ones if the skb has multiple tags. */ static inline netdev_features_t vlan_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb_vlan_tagged_multi(skb)) { /* In the case of multi-tagged packets, use a direct mask * instead of using netdev_interesect_features(), to make * sure that only devices supporting NETIF_F_HW_CSUM will * have checksum offloading support. */ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } return features; } /** * compare_vlan_header - Compare two vlan headers * @h1: Pointer to vlan header * @h2: Pointer to vlan header * * Compare two vlan headers. * * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. * * Return: 0 if equal, arbitrary non-zero value if not equal. */ static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, const struct vlan_hdr *h2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return *(u32 *)h1 ^ *(u32 *)h2; #else return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | ((__force u32)h1->h_vlan_encapsulated_proto ^ (__force u32)h2->h_vlan_encapsulated_proto); #endif } #endif /* !(_LINUX_IF_VLAN_H_) */
143 3153 1184 3154 651 136 1822 1818 1822 1818 1818 1821 653 652 1847 85 3 1848 85 1826 1822 1822 16 1816 3 1 1817 1818 1028 1026 1234 1014 934 1816 8 5 1817 964 947 291 1804 1636 1106 1811 319 1812 543 394 600 1820 704 1721 107 134 3041 3039 3151 3155 166 3148 3145 3152 98 101 4 3149 3147 2066 1212 1094 3149 53 15 3118 1888 1926 3150 1923 1853 754 1851 755 1975 545 3147 10 3154 3163 51 3158 3152 3158 11 3157 441 3150 3142 3 3145 3 3 3142 889 3145 191 193 100 101 164 3143 2028 21 1 20 20 2027 54 2737 3161 281 2901 39 2897 3148 3152 3146 3147 914 3146 3148 3153 3146 3155 3156 48 5 3148 3155 3146 3147 3163 3147 3150 3149 3145 3147 586 3051 3053 3048 3050 3055 3056 3050 3036 3038 3044 3045 3159 18 3149 3143 3156 3153 3154 3156 24 3155 3157 3155 3154 3157 127 3156 3163 3156 3151 3159 3071 30 233 231 27 209 213 233 26 26 26 26 1 25 17 3 15 14 4 15 2 14 7 4 3 1 1 2 15 25 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 // SPDX-License-Identifier: GPL-2.0 /* * Released under the GPLv2 only. */ #include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/usb/hcd.h> #include <linux/usb/quirks.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string_choices.h> #include <linux/device.h> #include <asm/byteorder.h> #include "usb.h" #define USB_MAXALTSETTING 128 /* Hard limit */ #define USB_MAXCONFIG 8 /* Arbitrary limit */ static int find_next_descriptor(unsigned char *buffer, int size, int dt1, int dt2, int *num_skipped) { struct usb_descriptor_header *h; int n = 0; unsigned char *buffer0 = buffer; /* Find the next descriptor of type dt1 or dt2 */ while (size > 0) { h = (struct usb_descriptor_header *) buffer; if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2) break; buffer += h->bLength; size -= h->bLength; ++n; } /* Store the number of descriptors skipped and return the * number of bytes skipped */ if (num_skipped) *num_skipped = n; return buffer - buffer0; } static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) { struct usb_ssp_isoc_ep_comp_descriptor *desc; /* * The SuperSpeedPlus Isoc endpoint companion descriptor immediately * follows the SuperSpeed Endpoint Companion descriptor */ desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP || size < USB_DT_SSP_ISOC_EP_COMP_SIZE) { dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion" "for config %d interface %d altsetting %d ep %d.\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); return; } memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE); } static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) { struct usb_ss_ep_comp_descriptor *desc; int max_tx; /* The SuperSpeed endpoint companion descriptor is supposed to * be the first thing immediately following the endpoint descriptor. */ desc = (struct usb_ss_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || size < USB_DT_SS_EP_COMP_SIZE) { dev_notice(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); /* Fill in some default values. * Leave bmAttributes as zero, which will mean no streams for * bulk, and isoc won't support multiple bursts of packets. * With bursts of only one packet, and a Mult of 1, the max * amount of data moved per endpoint service interval is one * packet. */ ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE; ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) ep->ss_ep_comp.wBytesPerInterval = ep->desc.wMaxPacketSize; return; } buffer += desc->bLength; size -= desc->bLength; memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); /* Check the various values */ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { dev_notice(ddev, "Control endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 0; } else if (desc->bMaxBurst > 15) { dev_notice(ddev, "Endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to 15\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 15; } if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) && desc->bmAttributes != 0) { dev_notice(ddev, "%s endpoint with bmAttributes = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", desc->bmAttributes, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 0; } else if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) { dev_notice(ddev, "Bulk endpoint with more than 65536 streams in " "config %d interface %d altsetting %d ep %d: " "setting to max\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 16; } else if (usb_endpoint_xfer_isoc(&ep->desc) && !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) && USB_SS_MULT(desc->bmAttributes) > 3) { dev_notice(ddev, "Isoc endpoint has Mult of %d in " "config %d interface %d altsetting %d ep %d: " "setting to 3\n", USB_SS_MULT(desc->bmAttributes), cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 2; } if (usb_endpoint_xfer_isoc(&ep->desc)) max_tx = (desc->bMaxBurst + 1) * (USB_SS_MULT(desc->bmAttributes)) * usb_endpoint_maxp(&ep->desc); else if (usb_endpoint_xfer_int(&ep->desc)) max_tx = usb_endpoint_maxp(&ep->desc) * (desc->bMaxBurst + 1); else max_tx = 999999; if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in " "config %d interface %d altsetting %d ep %d: " "setting to %d\n", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", le16_to_cpu(desc->wBytesPerInterval), cfgno, inum, asnum, ep->desc.bEndpointAddress, max_tx); ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); } /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */ if (usb_endpoint_xfer_isoc(&ep->desc) && USB_SS_SSP_ISOC_COMP(desc->bmAttributes)) usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum, ep, buffer, size); } static const unsigned short low_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 8, [USB_ENDPOINT_XFER_ISOC] = 0, [USB_ENDPOINT_XFER_BULK] = 0, [USB_ENDPOINT_XFER_INT] = 8, }; static const unsigned short full_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1023, [USB_ENDPOINT_XFER_BULK] = 64, [USB_ENDPOINT_XFER_INT] = 64, }; static const unsigned short high_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1024, /* Bulk should be 512, but some devices use 1024: we will warn below */ [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 512, [USB_ENDPOINT_XFER_ISOC] = 1024, [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, struct usb_endpoint_descriptor *e2) { if (e1->bEndpointAddress == e2->bEndpointAddress) return true; if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) return true; } return false; } /* * Check for duplicate endpoint addresses in other interfaces and in the * altsetting currently being parsed. */ static bool config_endpoint_is_duplicate(struct usb_host_config *config, int inum, int asnum, struct usb_endpoint_descriptor *d) { struct usb_endpoint_descriptor *epd; struct usb_interface_cache *intfc; struct usb_host_interface *alt; int i, j, k; for (i = 0; i < config->desc.bNumInterfaces; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { alt = &intfc->altsetting[j]; if (alt->desc.bInterfaceNumber == inum && alt->desc.bAlternateSetting != asnum) continue; for (k = 0; k < alt->desc.bNumEndpoints; ++k) { epd = &alt->endpoint[k].desc; if (endpoint_is_duplicate(epd, d)) return true; } } } return false; } static int usb_parse_endpoint(struct device *ddev, int cfgno, struct usb_host_config *config, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) { struct usb_device *udev = to_usb_device(ddev); unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; unsigned int maxp; const unsigned short *maxpacket_maxes; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE) n = USB_DT_ENDPOINT_AUDIO_SIZE; else if (d->bLength >= USB_DT_ENDPOINT_SIZE) n = USB_DT_ENDPOINT_SIZE; else { dev_notice(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint descriptor of length %d, skipping\n", cfgno, inum, asnum, d->bLength); goto skip_to_next_endpoint_or_interface_descriptor; } i = d->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; if (i == 0) { dev_notice(ddev, "config %d interface %d altsetting %d has an " "invalid descriptor for endpoint zero, skipping\n", cfgno, inum, asnum); goto skip_to_next_endpoint_or_interface_descriptor; } /* Only store as many endpoints as we have room for */ if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; /* Save a copy of the descriptor and use it instead of the original */ endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; memcpy(&endpoint->desc, d, n); d = &endpoint->desc; /* Clear the reserved bits in bEndpointAddress */ i = d->bEndpointAddress & (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK); if (i != d->bEndpointAddress) { dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n", cfgno, inum, asnum, d->bEndpointAddress, i); endpoint->desc.bEndpointAddress = i; } /* Check for duplicate endpoint addresses */ if (config_endpoint_is_duplicate(config, inum, asnum, d)) { dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } /* Ignore some endpoints */ if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { if (usb_endpoint_is_ignored(udev, ifp, d)) { dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } } /* Accept this endpoint */ ++ifp->desc.bNumEndpoints; INIT_LIST_HEAD(&endpoint->urb_list); /* * Fix up bInterval values outside the legal range. * Use 10 or 8 ms if no proper value can be guessed. */ i = 0; /* i = min, j = max, n = default */ j = 255; if (usb_endpoint_xfer_int(d)) { i = 1; switch (udev->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: case USB_SPEED_HIGH: /* * Many device manufacturers are using full-speed * bInterval values in high-speed interrupt endpoint * descriptors. Try to fix those and fall back to an * 8-ms default value otherwise. */ n = fls(d->bInterval*8); if (n == 0) n = 7; /* 8 ms = 2^(7-1) uframes */ j = 16; /* * Adjust bInterval for quirked devices. */ /* * This quirk fixes bIntervals reported in ms. */ if (udev->quirks & USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval) + 3, i, j); i = j = n; } /* * This quirk fixes bIntervals reported in * linear microframes. */ if (udev->quirks & USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval), i, j); i = j = n; } break; default: /* USB_SPEED_FULL or _LOW */ /* * For low-speed, 10 ms is the official minimum. * But some "overclocked" devices might want faster * polling so we'll allow it. */ n = 10; break; } } else if (usb_endpoint_xfer_isoc(d)) { i = 1; j = 16; switch (udev->speed) { case USB_SPEED_HIGH: n = 7; /* 8 ms = 2^(7-1) uframes */ break; default: /* USB_SPEED_FULL */ n = 4; /* 8 ms = 2^(4-1) frames */ break; } } if (d->bInterval < i || d->bInterval > j) { dev_notice(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X has an invalid bInterval %d, " "changing to %d\n", cfgno, inum, asnum, d->bEndpointAddress, d->bInterval, n); endpoint->desc.bInterval = n; } /* Some buggy low-speed devices have Bulk endpoints, which is * explicitly forbidden by the USB spec. In an attempt to make * them usable, we will try treating them as Interrupt endpoints. */ if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { dev_notice(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X is Bulk; changing to Interrupt\n", cfgno, inum, asnum, d->bEndpointAddress); endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; endpoint->desc.bInterval = 1; if (usb_endpoint_maxp(&endpoint->desc) > 8) endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } /* * Validate the wMaxPacketSize field. * Some devices have isochronous endpoints in altsetting 0; * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 * (see the end of section 5.6.3), so don't warn about them. */ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); } /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ switch (udev->speed) { case USB_SPEED_LOW: maxpacket_maxes = low_speed_maxpacket_maxes; break; case USB_SPEED_FULL: maxpacket_maxes = full_speed_maxpacket_maxes; break; case USB_SPEED_HIGH: /* Multiple-transactions bits are allowed only for HS periodic endpoints */ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { i = maxp & USB_EP_MAXP_MULT_MASK; maxp &= ~i; } fallthrough; default: maxpacket_maxes = high_speed_maxpacket_maxes; break; case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: maxpacket_maxes = super_speed_maxpacket_maxes; break; } j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; if (maxp > j) { dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp, j); maxp = j; endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); } /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not * be able to handle that particular bug, so let's warn... */ if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { if (maxp != 512) dev_notice(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp); } /* Parse a possible SuperSpeed endpoint companion descriptor */ if (udev->speed >= USB_SPEED_SUPER) usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, buffer, size); /* Skip over any Class Specific or Vendor Specific descriptors; * find the next endpoint or interface descriptor */ endpoint->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); endpoint->extralen = i; retval = buffer - buffer0 + i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, str_plural(n), "endpoint"); return retval; skip_to_next_endpoint_or_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } void usb_release_interface_cache(struct kref *ref) { struct usb_interface_cache *intfc = ref_to_usb_interface_cache(ref); int j; for (j = 0; j < intfc->num_altsetting; j++) { struct usb_host_interface *alt = &intfc->altsetting[j]; kfree(alt->endpoint); kfree(alt->string); } kfree(intfc); } static int usb_parse_interface(struct device *ddev, int cfgno, struct usb_host_config *config, unsigned char *buffer, int size, u8 inums[], u8 nalts[]) { unsigned char *buffer0 = buffer; struct usb_interface_descriptor *d; int inum, asnum; struct usb_interface_cache *intfc; struct usb_host_interface *alt; int i, n; int len, retval; int num_ep, num_ep_orig; d = (struct usb_interface_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength < USB_DT_INTERFACE_SIZE) goto skip_to_next_interface_descriptor; /* Which interface entry is this? */ intfc = NULL; inum = d->bInterfaceNumber; for (i = 0; i < config->desc.bNumInterfaces; ++i) { if (inums[i] == inum) { intfc = config->intf_cache[i]; break; } } if (!intfc || intfc->num_altsetting >= nalts[i]) goto skip_to_next_interface_descriptor; /* Check for duplicate altsetting entries */ asnum = d->bAlternateSetting; for ((i = 0, alt = &intfc->altsetting[0]); i < intfc->num_altsetting; (++i, ++alt)) { if (alt->desc.bAlternateSetting == asnum) { dev_notice(ddev, "Duplicate descriptor for config %d " "interface %d altsetting %d, skipping\n", cfgno, inum, asnum); goto skip_to_next_interface_descriptor; } } ++intfc->num_altsetting; memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); /* Skip over any Class Specific or Vendor Specific descriptors; * find the first endpoint or interface descriptor */ alt->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); alt->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, str_plural(n), "interface"); buffer += i; size -= i; /* Allocate space for the right(?) number of endpoints */ num_ep = num_ep_orig = alt->desc.bNumEndpoints; alt->desc.bNumEndpoints = 0; /* Use as a counter */ if (num_ep > USB_MAXENDPOINTS) { dev_notice(ddev, "too many endpoints for config %d interface %d " "altsetting %d: %d, using maximum allowed: %d\n", cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); num_ep = USB_MAXENDPOINTS; } if (num_ep > 0) { /* Can't allocate 0 bytes */ len = sizeof(struct usb_host_endpoint) * num_ep; alt->endpoint = kzalloc(len, GFP_KERNEL); if (!alt->endpoint) return -ENOMEM; } /* Parse all the endpoint descriptors */ n = 0; while (size > 0) { if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; buffer += retval; size -= retval; } if (n != num_ep_orig) dev_notice(ddev, "config %d interface %d altsetting %d has %d " "endpoint descriptor%s, different from the interface " "descriptor's value: %d\n", cfgno, inum, asnum, n, str_plural(n), num_ep_orig); return buffer - buffer0; skip_to_next_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } static int usb_parse_configuration(struct usb_device *dev, int cfgidx, struct usb_host_config *config, unsigned char *buffer, int size) { struct device *ddev = &dev->dev; unsigned char *buffer0 = buffer; int cfgno; int nintf, nintf_orig; int i, j, n; struct usb_interface_cache *intfc; unsigned char *buffer2; int size2; struct usb_descriptor_header *header; int retval; u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES]; unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); nintf = nintf_orig = config->desc.bNumInterfaces; config->desc.bNumInterfaces = 0; // Adjusted later if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { dev_notice(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; } cfgno = config->desc.bConfigurationValue; buffer += config->desc.bLength; size -= config->desc.bLength; if (nintf > USB_MAXINTERFACES) { dev_notice(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; } /* Go through the descriptors, checking their length and counting the * number of altsettings for each interface */ n = 0; for ((buffer2 = buffer, size2 = size); size2 > 0; (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { dev_notice(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, str_plural(size2)); break; } header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { dev_notice(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; } if (header->bDescriptorType == USB_DT_INTERFACE) { struct usb_interface_descriptor *d; int inum; d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { dev_notice(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; } inum = d->bInterfaceNumber; if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && n >= nintf_orig) { dev_notice(ddev, "config %d has more interface " "descriptors, than it declares in " "bNumInterfaces, ignoring interface " "number: %d\n", cfgno, inum); continue; } if (inum >= nintf_orig) dev_notice(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); /* Have we already encountered this interface? * Count its altsettings */ for (i = 0; i < n; ++i) { if (inums[i] == inum) break; } if (i < n) { if (nalts[i] < 255) ++nalts[i]; } else if (n < USB_MAXINTERFACES) { inums[n] = inum; nalts[n] = 1; ++n; } } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { struct usb_interface_assoc_descriptor *d; d = (struct usb_interface_assoc_descriptor *)header; if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { dev_notice(ddev, "config %d has an invalid interface association descriptor of length %d, skipping\n", cfgno, d->bLength); continue; } if (iad_num == USB_MAXIADS) { dev_notice(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { config->intf_assoc[iad_num] = d; iad_num++; } } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) dev_notice(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); } /* for ((buffer2 = buffer, size2 = size); ...) */ size = buffer2 - buffer; config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) dev_notice(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, str_plural(n), nintf_orig); else if (n == 0) dev_notice(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ for (i = 0; i < nintf; ++i) { for (j = 0; j < nintf; ++j) { if (inums[j] == i) break; } if (j >= nintf) dev_notice(ddev, "config %d has no interface number " "%d\n", cfgno, i); } /* Allocate the usb_interface_caches and altsetting arrays */ for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { dev_notice(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); nalts[i] = j = USB_MAXALTSETTING; } intfc = kzalloc(struct_size(intfc, altsetting, j), GFP_KERNEL); config->intf_cache[i] = intfc; if (!intfc) return -ENOMEM; kref_init(&intfc->ref); } /* FIXME: parse the BOS descriptor */ /* Skip over any Class Specific or Vendor Specific descriptors; * find the first interface descriptor */ config->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, &n); config->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, str_plural(n), "configuration"); buffer += i; size -= i; /* Parse all the interface/altsetting descriptors */ while (size > 0) { retval = usb_parse_interface(ddev, cfgno, config, buffer, size, inums, nalts); if (retval < 0) return retval; buffer += retval; size -= retval; } /* Check for missing altsettings */ for (i = 0; i < nintf; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { for (n = 0; n < intfc->num_altsetting; ++n) { if (intfc->altsetting[n].desc. bAlternateSetting == j) break; } if (n >= intfc->num_altsetting) dev_notice(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } return 0; } /* hub-only!! ... and only exported for reset/reinit path. * otherwise used internally on disconnect/destroy path */ void usb_destroy_configuration(struct usb_device *dev) { int c, i; if (!dev->config) return; if (dev->rawdescriptors) { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) kfree(dev->rawdescriptors[i]); kfree(dev->rawdescriptors); dev->rawdescriptors = NULL; } for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { struct usb_host_config *cf = &dev->config[c]; kfree(cf->string); for (i = 0; i < cf->desc.bNumInterfaces; i++) { if (cf->intf_cache[i]) kref_put(&cf->intf_cache[i]->ref, usb_release_interface_cache); } } kfree(dev->config); dev->config = NULL; } /* * Get the USB config descriptors, cache and parse'em * * hub-only!! ... and only in reset path, or usb_new_device() * (used by real hubs and virtual root hubs) */ int usb_get_configuration(struct usb_device *dev) { struct device *ddev = &dev->dev; int ncfg = dev->descriptor.bNumConfigurations; unsigned int cfgno, length; unsigned char *bigbuffer; struct usb_config_descriptor *desc; int result; if (ncfg > USB_MAXCONFIG) { dev_notice(ddev, "too many configurations: %d, " "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } if (ncfg < 1) { dev_err(ddev, "no configurations\n"); return -EINVAL; } length = ncfg * sizeof(struct usb_host_config); dev->config = kzalloc(length, GFP_KERNEL); if (!dev->config) return -ENOMEM; length = ncfg * sizeof(char *); dev->rawdescriptors = kzalloc(length, GFP_KERNEL); if (!dev->rawdescriptors) return -ENOMEM; desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); if (!desc) return -ENOMEM; for (cfgno = 0; cfgno < ncfg; cfgno++) { /* We grab just the first descriptor so we know how long * the whole configuration is */ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, desc, USB_DT_CONFIG_SIZE); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s: %d\n", cfgno, "start", result); if (result != -EPIPE) goto err; dev_notice(ddev, "chopping to %d config(s)\n", cfgno); dev->descriptor.bNumConfigurations = cfgno; break; } else if (result < 4) { dev_err(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, USB_DT_CONFIG_SIZE, result); result = -EINVAL; goto err; } length = max_t(int, le16_to_cpu(desc->wTotalLength), USB_DT_CONFIG_SIZE); /* Now that we know the length, get the whole thing */ bigbuffer = kmalloc(length, GFP_KERNEL); if (!bigbuffer) { result = -ENOMEM; goto err; } if (dev->quirks & USB_QUIRK_DELAY_INIT) msleep(200); result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s\n", cfgno, "all"); kfree(bigbuffer); goto err; } if (result < length) { dev_notice(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, length, result); length = result; } dev->rawdescriptors[cfgno] = bigbuffer; result = usb_parse_configuration(dev, cfgno, &dev->config[cfgno], bigbuffer, length); if (result < 0) { ++cfgno; goto err; } } err: kfree(desc); dev->descriptor.bNumConfigurations = cfgno; return result; } void usb_release_bos_descriptor(struct usb_device *dev) { if (dev->bos) { kfree(dev->bos->desc); kfree(dev->bos); dev->bos = NULL; } } static const __u8 bos_desc_len[256] = { [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, }; /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; struct usb_ssp_cap_descriptor *ssp_cap; unsigned char *buffer, *buffer0; int length, total_len, num, i, ssac; __u8 cap_type; int ret; bos = kzalloc(sizeof(*bos), GFP_KERNEL); if (!bos) return -ENOMEM; /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) { dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); return ret; } length = bos->bLength; total_len = le16_to_cpu(bos->wTotalLength); num = bos->bNumDeviceCaps; kfree(bos); if (total_len < length) return -EINVAL; dev->bos = kzalloc(sizeof(*dev->bos), GFP_KERNEL); if (!dev->bos) return -ENOMEM; /* Now let's get the whole BOS descriptor set */ buffer = kzalloc(total_len, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err; } dev->bos->desc = (struct usb_bos_descriptor *)buffer; ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len); if (ret < total_len) { dev_notice(ddev, "unable to get BOS descriptor set\n"); if (ret >= 0) ret = -ENOMSG; goto err; } buffer0 = buffer; total_len -= length; buffer += length; for (i = 0; i < num; i++) { cap = (struct usb_dev_cap_header *)buffer; if (total_len < sizeof(*cap) || total_len < cap->bLength) { dev->bos->desc->bNumDeviceCaps = i; break; } cap_type = cap->bDevCapabilityType; length = cap->bLength; if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { dev->bos->desc->bNumDeviceCaps = i; break; } if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_notice(ddev, "descriptor type invalid, skip\n"); goto skip_to_next_descriptor; } switch (cap_type) { case USB_CAP_TYPE_EXT: dev->bos->ext_cap = (struct usb_ext_cap_descriptor *)buffer; break; case USB_SS_CAP_TYPE: dev->bos->ss_cap = (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; ssac = (le32_to_cpu(ssp_cap->bmAttributes) & USB_SSP_SUBLINK_SPEED_ATTRIBS); if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) dev->bos->ssp_cap = ssp_cap; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = (struct usb_ss_container_id_descriptor *)buffer; break; case USB_PTM_CAP_TYPE: dev->bos->ptm_cap = (struct usb_ptm_cap_descriptor *)buffer; break; default: break; } skip_to_next_descriptor: total_len -= length; buffer += length; } dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); return 0; err: usb_release_bos_descriptor(dev); return ret; }
3264 3331 115 1105 3207 3335 1099 132 3190 3335 18 18 4 228 416 160 214 73 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H #define __LINUX_INSIDE_SPINLOCK_H /* * include/linux/spinlock.h - generic spinlock/rwlock declarations * * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types_raw: * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types_raw: * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ #include <linux/typecheck.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/thread_info.h> #include <linux/stringify.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/barrier.h> #include <asm/mmiowb.h> /* * Must define these before including other files, inline functions need them */ #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ extra \ ".ifndef " LOCK_SECTION_NAME "\n\t" \ LOCK_SECTION_NAME ":\n\t" \ ".endif\n" #define LOCK_SECTION_END \ ".previous\n\t" #define __lockfunc __section(".spinlock.text") /* * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include <linux/spinlock_types.h> /* * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): */ #ifdef CONFIG_SMP # include <asm/spinlock.h> #else # include <linux/spinlock_up.h> #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key, short inner); # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ } while (0) #else # define raw_spin_lock_init(lock) \ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef arch_spin_is_contended #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ /* * smp_mb__after_spinlock() provides the equivalent of a full memory barrier * between program-order earlier lock acquisitions and program-order later * memory accesses. * * This guarantees that the following two properties hold: * * 1) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 * * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); * spin_lock(S); smp_mb(); * smp_mb__after_spinlock(); r1 = READ_ONCE(X); * r0 = READ_ONCE(Y); * spin_unlock(S); * * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments * preceding the call to smp_mb__after_spinlock() in __schedule() and in * try_to_wake_up(). * * 2) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 CPU2 * * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); * WRITE_ONCE(Y, 1); * spin_unlock(S); * * it is forbidden that CPU0's critical section executes before CPU1's * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments * preceding the calls to smp_rmb() in try_to_wake_up() for similar * snippets but "projected" onto two CPUs. * * Property (2) upgrades the lock to an RCsc lock. * * Since most load-store architectures implement ACQUIRE with an smp_mb() after * the LL/SC loop, they need no further barriers. Similarly all our TSO * architectures imply an smp_mb() for each atomic instruction and equally don't * need more. * * Architectures that can implement ACQUIRE better need to take care. */ #ifndef smp_mb__after_spinlock #define smp_mb__after_spinlock() kcsan_mb() #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); extern int do_raw_spin_trylock(raw_spinlock_t *lock); extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); #else static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&(lock)->raw_lock); if (ret) mmiowb_spin_lock(); return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } #endif /* * Define the various spin_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) #define raw_spin_lock(lock) _raw_spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else /* * Always evaluate the 'subclass' argument to avoid that the compiler * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #endif #else #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_lock_irqsave(lock, flags); \ } while (0) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ raw_spin_lock_irqsave(lock, flags) #endif #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) #define raw_spin_unlock(lock) _raw_spin_unlock(lock) #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) #define raw_spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) #define raw_spin_trylock_bh(lock) \ __cond_lock(lock, _raw_spin_trylock_bh(lock)) #define raw_spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) #define raw_spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) #ifndef CONFIG_PREEMPT_RT /* Include rwlock functions for !RT */ #include <linux/rwlock.h> #endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include <linux/spinlock_api_smp.h> #else # include <linux/spinlock_api_up.h> #endif /* Non PREEMPT_RT kernel, map to raw spinlocks: */ #ifndef CONFIG_PREEMPT_RT /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { return &lock->rlock; } #ifdef CONFIG_DEBUG_SPINLOCK # define spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init(spinlock_check(lock), \ #lock, &__key, LD_WAIT_CONFIG); \ } while (0) #else # define spin_lock_init(_lock) \ do { \ spinlock_check(_lock); \ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ } while (0) #endif static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); } static __always_inline void spin_lock_bh(spinlock_t *lock) { raw_spin_lock_bh(&lock->rlock); } static __always_inline int spin_trylock(spinlock_t *lock) { return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) static __always_inline void spin_lock_irq(spinlock_t *lock) { raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); } static __always_inline int spin_trylock_bh(spinlock_t *lock) { return raw_spin_trylock_bh(&lock->rlock); } static __always_inline int spin_trylock_irq(spinlock_t *lock) { return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) /** * spin_is_locked() - Check whether a spinlock is locked. * @lock: Pointer to the spinlock. * * This function is NOT required to provide any memory ordering * guarantees; it could be used for debugging purposes or, when * additional synchronization is needed, accompanied with other * constructs (memory barriers) enforcing the synchronization. * * Returns: 1 if @lock is locked, 0 otherwise. * * Note that the function only tells you that the spinlock is * seen to be locked, not that it is locked on your CPU. * * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, * the return value is always 0 (see include/linux/spinlock_up.h). * Therefore you should not rely heavily on the return value. */ static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); } static __always_inline int spin_is_contended(spinlock_t *lock) { return raw_spin_is_contended(&lock->rlock); } #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) #else /* !CONFIG_PREEMPT_RT */ # include <linux/spinlock_rt.h> #endif /* CONFIG_PREEMPT_RT */ /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPTION, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { if (!preempt_model_preemptible()) return 0; return spin_is_contended(lock); } /* * Check if a rwlock is contended. * Returns non-zero if there is another task waiting on the rwlock. * Returns zero if the lock is not contended or the system / underlying * rwlock implementation does not support contention detection. * Technically does not depend on CONFIG_PREEMPTION, but a general need * for low latency. */ static inline int rwlock_needbreak(rwlock_t *lock) { if (!preempt_model_preemptible()) return 0; return rwlock_is_contended(lock); } /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) */ #include <linux/atomic.h> /** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question * * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. */ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, unsigned long *flags); #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock); #define atomic_dec_and_raw_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock)) extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, unsigned long *flags); #define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))) int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp, const char *name, struct lock_class_key *key); #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ ({ \ static struct lock_class_key key; \ int ret; \ \ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ cpu_mult, gfp, #locks, &key); \ ret; \ }) void free_bucket_spinlocks(spinlock_t *locks); DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, raw_spin_lock(_T->lock), raw_spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), raw_spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, raw_spin_lock_irq(_T->lock), raw_spin_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, raw_spin_trylock_irqsave(_T->lock, _T->flags)) DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_lock(_T->lock), spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, spin_lock_irq(_T->lock), spin_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, spin_trylock_irqsave(_T->lock, _T->flags)) DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, read_lock(_T->lock), read_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t, read_lock_irq(_T->lock), read_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t, read_lock_irqsave(_T->lock, _T->flags), read_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1(write_lock, rwlock_t, write_lock(_T->lock), write_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t, write_lock_irq(_T->lock), write_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, write_lock_irqsave(_T->lock, _T->flags), write_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) #undef __LINUX_INSIDE_SPINLOCK_H #endif /* __LINUX_SPINLOCK_H */
5 4 5 3 5 5 5 4 5 3 3 3 1 3 1 1 1 3 3 3 3 2 2 1 1 1 1 1 1 1 1 1 1 1 3 1 3 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 // SPDX-License-Identifier: GPL-2.0-only /* * Driver for AzureWave 6007 DVB-C/T USB2.0 and clones * * Copyright (c) Henry Wang <Henry.wang@AzureWave.com> * * This driver was made publicly available by Terratec, at: * http://linux.terratec.de/files/TERRATEC_H7/20110323_TERRATEC_H7_Linux.tar.gz * The original driver's license is GPL, as declared with MODULE_LICENSE() * * Copyright (c) 2010-2012 Mauro Carvalho Chehab * Driver modified by in order to work with upstream drxk driver, and * tons of bugs got fixed, and converted to use dvb-usb-v2. */ #include "drxk.h" #include "mt2063.h" #include <media/dvb_ca_en50221.h> #include "dvb_usb.h" #include "cypress_firmware.h" #define AZ6007_FIRMWARE "dvb-usb-terratec-h7-az6007.fw" static int az6007_xfer_debug; module_param_named(xfer_debug, az6007_xfer_debug, int, 0644); MODULE_PARM_DESC(xfer_debug, "Enable xfer debug"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* Known requests (Cypress FX2 firmware + az6007 "private" ones*/ #define FX2_OED 0xb5 #define AZ6007_READ_DATA 0xb7 #define AZ6007_I2C_RD 0xb9 #define AZ6007_POWER 0xbc #define AZ6007_I2C_WR 0xbd #define FX2_SCON1 0xc0 #define AZ6007_TS_THROUGH 0xc7 #define AZ6007_READ_IR 0xb4 struct az6007_device_state { struct mutex mutex; struct mutex ca_mutex; struct dvb_ca_en50221 ca; unsigned warm:1; int (*gate_ctrl) (struct dvb_frontend *, int); unsigned char data[4096]; }; static struct drxk_config terratec_h7_drxk = { .adr = 0x29, .parallel_ts = true, .dynamic_clk = true, .single_master = true, .enable_merr_cfg = true, .no_i2c_bridge = false, .chunk_size = 64, .mpeg_out_clk_strength = 0x02, .qam_demod_parameter_count = 2, .microcode_name = "dvb-usb-terratec-h7-drxk.fw", }; static struct drxk_config cablestar_hdci_drxk = { .adr = 0x29, .parallel_ts = true, .dynamic_clk = true, .single_master = true, .enable_merr_cfg = true, .no_i2c_bridge = false, .chunk_size = 64, .mpeg_out_clk_strength = 0x02, .qam_demod_parameter_count = 2, .microcode_name = "dvb-usb-technisat-cablestar-hdci-drxk.fw", }; static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) { struct az6007_device_state *st = fe_to_priv(fe); struct dvb_usb_adapter *adap = fe->sec_priv; int status = 0; pr_debug("%s: %s\n", __func__, enable ? "enable" : "disable"); if (!adap || !st) return -EINVAL; if (enable) status = st->gate_ctrl(fe, 1); else status = st->gate_ctrl(fe, 0); return status; } static struct mt2063_config az6007_mt2063_config = { .tuner_address = 0x60, .refclock = 36125000, }; static int __az6007_read(struct usb_device *udev, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, b, blen, 5000); if (ret < 0) { pr_warn("usb read operation failed. (%d)\n", ret); return -EIO; } if (az6007_xfer_debug) { printk(KERN_DEBUG "az6007: IN req: %02x, value: %04x, index: %04x\n", req, value, index); print_hex_dump_bytes("az6007: payload: ", DUMP_PREFIX_NONE, b, blen); } return ret; } static int az6007_read(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct az6007_device_state *st = d->priv; int ret; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; ret = __az6007_read(d->udev, req, value, index, b, blen); mutex_unlock(&st->mutex); return ret; } static int __az6007_write(struct usb_device *udev, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; if (az6007_xfer_debug) { printk(KERN_DEBUG "az6007: OUT req: %02x, value: %04x, index: %04x\n", req, value, index); print_hex_dump_bytes("az6007: payload: ", DUMP_PREFIX_NONE, b, blen); } if (blen > 64) { pr_err("az6007: tried to write %d bytes, but I2C max size is 64 bytes\n", blen); return -EOPNOTSUPP; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, b, blen, 5000); if (ret != blen) { pr_err("usb write operation failed. (%d)\n", ret); return -EIO; } return 0; } static int az6007_write(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct az6007_device_state *st = d->priv; int ret; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; ret = __az6007_write(d->udev, req, value, index, b, blen); mutex_unlock(&st->mutex); return ret; } static int az6007_streaming_ctrl(struct dvb_frontend *fe, int onoff) { struct dvb_usb_device *d = fe_to_d(fe); pr_debug("%s: %s\n", __func__, onoff ? "enable" : "disable"); return az6007_write(d, 0xbc, onoff, 0, NULL, 0); } #if IS_ENABLED(CONFIG_RC_CORE) /* remote control stuff (does not work with my box) */ static int az6007_rc_query(struct dvb_usb_device *d) { struct az6007_device_state *st = d_to_priv(d); unsigned code; enum rc_proto proto; if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0) return -EIO; if (st->data[1] == 0x44) return 0; if ((st->data[3] ^ st->data[4]) == 0xff) { if ((st->data[1] ^ st->data[2]) == 0xff) { code = RC_SCANCODE_NEC(st->data[1], st->data[3]); proto = RC_PROTO_NEC; } else { code = RC_SCANCODE_NECX(st->data[1] << 8 | st->data[2], st->data[3]); proto = RC_PROTO_NECX; } } else { code = RC_SCANCODE_NEC32(st->data[1] << 24 | st->data[2] << 16 | st->data[3] << 8 | st->data[4]); proto = RC_PROTO_NEC32; } rc_keydown(d->rc_dev, proto, code, st->data[5]); return 0; } static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { pr_debug("Getting az6007 Remote Control properties\n"); rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32; rc->query = az6007_rc_query; rc->interval = 400; return 0; } #else #define az6007_get_rc_config NULL #endif static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC1; value = address; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EINVAL; } else { ret = b[0]; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6007_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value1; u16 index; int blen; pr_debug("%s(), slot %d\n", __func__, slot); if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC2; value1 = address; index = value; blen = 0; ret = az6007_write(d, req, value1, index, NULL, blen); if (ret != 0) pr_warn("usb out operation failed. (%d)\n", ret); mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC3; value = address; index = 0; blen = 2; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EINVAL; } else { if (b[0] == 0) pr_warn("Read CI IO error\n"); ret = b[1]; pr_debug("read cam data = %x from 0x%x\n", b[1], value); } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6007_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value1; u16 index; int blen; if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC4; value1 = address; index = value; blen = 0; ret = az6007_write(d, req, value1, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; req = 0xC8; value = 0; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EIO; } else{ ret = b[0]; } kfree(b); return ret; } static int az6007_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret, i; u8 req; u16 value; u16 index; int blen; mutex_lock(&state->ca_mutex); req = 0xC6; value = 1; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } msleep(500); req = 0xC6; value = 0; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } for (i = 0; i < 15; i++) { msleep(100); if (CI_CamReady(ca, slot)) { pr_debug("CAM Ready\n"); break; } } msleep(5000); failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return 0; } static int az6007_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; pr_debug("%s()\n", __func__); mutex_lock(&state->ca_mutex); req = 0xC7; value = 1; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC5; value = 0; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EIO; } else ret = 0; if (!ret && b[0] == 1) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static void az6007_ci_uninit(struct dvb_usb_device *d) { struct az6007_device_state *state; pr_debug("%s()\n", __func__); if (NULL == d) return; state = d_to_priv(d); if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int az6007_ci_init(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct az6007_device_state *state = adap_to_priv(adap); int ret; pr_debug("%s()\n", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = az6007_ci_read_attribute_mem; state->ca.write_attribute_mem = az6007_ci_write_attribute_mem; state->ca.read_cam_control = az6007_ci_read_cam_control; state->ca.write_cam_control = az6007_ci_write_cam_control; state->ca.slot_reset = az6007_ci_slot_reset; state->ca.slot_shutdown = az6007_ci_slot_shutdown; state->ca.slot_ts_enable = az6007_ci_slot_ts_enable; state->ca.poll_slot_status = az6007_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&adap->dvb_adap, &state->ca, 0, /* flags */ 1);/* n_slots */ if (ret != 0) { pr_err("Cannot initialize CI: Error %d.\n", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } pr_debug("CI initialized.\n"); return 0; } static int az6007_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6]) { struct dvb_usb_device *d = adap_to_d(adap); struct az6007_device_state *st = adap_to_priv(adap); int ret; ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6); memcpy(mac, st->data, 6); if (ret > 0) pr_debug("%s: mac is %pM\n", __func__, mac); return ret; } static int az6007_frontend_attach(struct dvb_usb_adapter *adap) { struct az6007_device_state *st = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching demod drxk\n"); adap->fe[0] = dvb_attach(drxk_attach, &terratec_h7_drxk, &d->i2c_adap); if (!adap->fe[0]) return -EINVAL; adap->fe[0]->sec_priv = adap; st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl; adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl; az6007_ci_init(adap); return 0; } static int az6007_cablestar_hdci_frontend_attach(struct dvb_usb_adapter *adap) { struct az6007_device_state *st = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching demod drxk\n"); adap->fe[0] = dvb_attach(drxk_attach, &cablestar_hdci_drxk, &d->i2c_adap); if (!adap->fe[0]) return -EINVAL; adap->fe[0]->sec_priv = adap; st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl; adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl; az6007_ci_init(adap); return 0; } static int az6007_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching tuner mt2063\n"); /* Attach mt2063 to DVB-C frontend */ if (adap->fe[0]->ops.i2c_gate_ctrl) adap->fe[0]->ops.i2c_gate_ctrl(adap->fe[0], 1); if (!dvb_attach(mt2063_attach, adap->fe[0], &az6007_mt2063_config, &d->i2c_adap)) return -EINVAL; if (adap->fe[0]->ops.i2c_gate_ctrl) adap->fe[0]->ops.i2c_gate_ctrl(adap->fe[0], 0); return 0; } static int az6007_power_ctrl(struct dvb_usb_device *d, int onoff) { struct az6007_device_state *state = d_to_priv(d); int ret; pr_debug("%s()\n", __func__); if (!state->warm) { mutex_init(&state->mutex); ret = az6007_write(d, AZ6007_POWER, 0, 2, NULL, 0); if (ret < 0) return ret; msleep(60); ret = az6007_write(d, AZ6007_POWER, 1, 4, NULL, 0); if (ret < 0) return ret; msleep(100); ret = az6007_write(d, AZ6007_POWER, 1, 3, NULL, 0); if (ret < 0) return ret; msleep(20); ret = az6007_write(d, AZ6007_POWER, 1, 4, NULL, 0); if (ret < 0) return ret; msleep(400); ret = az6007_write(d, FX2_SCON1, 0, 3, NULL, 0); if (ret < 0) return ret; msleep(150); ret = az6007_write(d, FX2_SCON1, 1, 3, NULL, 0); if (ret < 0) return ret; msleep(430); ret = az6007_write(d, AZ6007_POWER, 0, 0, NULL, 0); if (ret < 0) return ret; state->warm = true; return 0; } if (!onoff) return 0; az6007_write(d, AZ6007_POWER, 0, 0, NULL, 0); az6007_write(d, AZ6007_TS_THROUGH, 0, 0, NULL, 0); return 0; } /* I2C */ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct az6007_device_state *st = d_to_priv(d); int i, j, len; int ret = 0; u16 index; u16 value; int length; u8 req, addr; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { addr = msgs[i].addr << 1; if (((i + 1) < num) && (msgs[i].len == 1) && ((msgs[i].flags & I2C_M_RD) != I2C_M_RD) && (msgs[i + 1].flags & I2C_M_RD) && (msgs[i].addr == msgs[i + 1].addr)) { /* * A write + read xfer for the same address, where * the first xfer has just 1 byte length. * Need to join both into one operation */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W/R addr=0x%x len=%d/%d\n", addr, msgs[i].len, msgs[i + 1].len); req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr | (1 << 8); length = 6 + msgs[i + 1].len; len = msgs[i + 1].len; ret = __az6007_read(d->udev, req, value, index, st->data, length); if (ret >= len) { for (j = 0; j < len; j++) msgs[i + 1].buf[j] = st->data[j + 5]; } else ret = -EIO; i++; } else if (!(msgs[i].flags & I2C_M_RD)) { /* write bytes */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n", addr, msgs[i].len); if (msgs[i].len < 1) { ret = -EIO; goto err; } req = AZ6007_I2C_WR; index = msgs[i].buf[0]; value = addr | (1 << 8); length = msgs[i].len - 1; len = msgs[i].len - 1; for (j = 0; j < len; j++) st->data[j] = msgs[i].buf[j + 1]; ret = __az6007_write(d->udev, req, value, index, st->data, length); } else { /* read bytes */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n", addr, msgs[i].len); if (msgs[i].len < 1) { ret = -EIO; goto err; } req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr; length = msgs[i].len + 6; len = msgs[i].len; ret = __az6007_read(d->udev, req, value, index, st->data, length); for (j = 0; j < len; j++) msgs[i].buf[j] = st->data[j + 5]; } if (ret < 0) goto err; } err: mutex_unlock(&st->mutex); if (ret < 0) { pr_info("%s ERROR: %i\n", __func__, ret); return ret; } return num; } static u32 az6007_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm az6007_i2c_algo = { .master_xfer = az6007_i2c_xfer, .functionality = az6007_i2c_func, }; static int az6007_identify_state(struct dvb_usb_device *d, const char **name) { int ret; u8 *mac; pr_debug("Identifying az6007 state\n"); mac = kmalloc(6, GFP_ATOMIC); if (!mac) return -ENOMEM; /* Try to read the mac address */ ret = __az6007_read(d->udev, AZ6007_READ_DATA, 6, 0, mac, 6); if (ret == 6) ret = WARM; else ret = COLD; kfree(mac); if (ret == COLD) { __az6007_write(d->udev, 0x09, 1, 0, NULL, 0); __az6007_write(d->udev, 0x00, 0, 0, NULL, 0); __az6007_write(d->udev, 0x00, 0, 0, NULL, 0); } pr_debug("Device is on %s state\n", ret == WARM ? "warm" : "cold"); return ret; } static void az6007_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6007_ci_uninit(d); dvb_usbv2_disconnect(intf); } static int az6007_download_firmware(struct dvb_usb_device *d, const struct firmware *fw) { pr_debug("Loading az6007 firmware\n"); return cypress_load_firmware(d->udev, fw, CYPRESS_FX2); } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties az6007_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .firmware = AZ6007_FIRMWARE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct az6007_device_state), .i2c_algo = &az6007_i2c_algo, .tuner_attach = az6007_tuner_attach, .frontend_attach = az6007_frontend_attach, .streaming_ctrl = az6007_streaming_ctrl, .get_rc_config = az6007_get_rc_config, .read_mac_address = az6007_read_mac_addr, .download_firmware = az6007_download_firmware, .identify_state = az6007_identify_state, .power_ctrl = az6007_power_ctrl, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), } } }; static struct dvb_usb_device_properties az6007_cablestar_hdci_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .firmware = AZ6007_FIRMWARE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct az6007_device_state), .i2c_algo = &az6007_i2c_algo, .tuner_attach = az6007_tuner_attach, .frontend_attach = az6007_cablestar_hdci_frontend_attach, .streaming_ctrl = az6007_streaming_ctrl, /* ditch get_rc_config as it can't work (TS35 remote, I believe it's rc5) */ .get_rc_config = NULL, .read_mac_address = az6007_read_mac_addr, .download_firmware = az6007_download_firmware, .identify_state = az6007_identify_state, .power_ctrl = az6007_power_ctrl, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), } } }; static const struct usb_device_id az6007_usb_table[] = { {DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007, &az6007_props, "Azurewave 6007", RC_MAP_EMPTY)}, {DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7, &az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)}, {DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_2, &az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)}, {DVB_USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI, &az6007_cablestar_hdci_props, "Technisat CableStar Combo HD CI", RC_MAP_EMPTY)}, {0}, }; MODULE_DEVICE_TABLE(usb, az6007_usb_table); static int az6007_suspend(struct usb_interface *intf, pm_message_t msg) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6007_ci_uninit(d); return dvb_usbv2_suspend(intf, msg); } static int az6007_resume(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct dvb_usb_adapter *adap = &d->adapter[0]; az6007_ci_init(adap); return dvb_usbv2_resume(intf); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver az6007_usb_driver = { .name = KBUILD_MODNAME, .id_table = az6007_usb_table, .probe = dvb_usbv2_probe, .disconnect = az6007_usb_disconnect, .no_dynamic_id = 1, .soft_unbind = 1, /* * FIXME: need to implement reset_resume, likely with * dvb-usb-v2 core support */ .suspend = az6007_suspend, .resume = az6007_resume, }; module_usb_driver(az6007_usb_driver); MODULE_AUTHOR("Henry Wang <Henry.wang@AzureWave.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION("Driver for AzureWave 6007 DVB-C/T USB2.0 and clones"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(AZ6007_FIRMWARE);
2 1 2 1 2 2 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) * Emil Myhrman (emil.myhrman@gmail.com) */ #include <linux/wait.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/leds.h> #include <sound/core.h> #include <sound/control.h> #include "capture.h" #include "driver.h" #include "playback.h" enum line6_device_type { LINE6_GUITARPORT, LINE6_PODSTUDIO_GX, LINE6_PODSTUDIO_UX1, LINE6_PODSTUDIO_UX2, LINE6_TONEPORT_GX, LINE6_TONEPORT_UX1, LINE6_TONEPORT_UX2, }; struct usb_line6_toneport; struct toneport_led { struct led_classdev dev; char name[64]; struct usb_line6_toneport *toneport; bool registered; }; struct usb_line6_toneport { /* Generic Line 6 USB data */ struct usb_line6 line6; /* Source selector */ int source; /* Serial number of device */ u32 serial_number; /* Firmware version (x 100) */ u8 firmware_version; /* Device type */ enum line6_device_type type; /* LED instances */ struct toneport_led leds[2]; }; #define line6_to_toneport(x) container_of(x, struct usb_line6_toneport, line6) static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2); #define TONEPORT_PCM_DELAY 1 static const struct snd_ratden toneport_ratden = { .num_min = 44100, .num_max = 44100, .num_step = 1, .den = 1 }; static struct line6_pcm_properties toneport_pcm_properties = { .playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_KNOT, .rate_min = 44100, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 60000, .period_bytes_min = 64, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 1024}, .capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_KNOT, .rate_min = 44100, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 60000, .period_bytes_min = 64, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 1024}, .rates = { .nrats = 1, .rats = &toneport_ratden}, .bytes_per_channel = 2 }; static const struct { const char *name; int code; } toneport_source_info[] = { {"Microphone", 0x0a01}, {"Line", 0x0801}, {"Instrument", 0x0b01}, {"Inst & Mic", 0x0901} }; static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) { int ret; ret = usb_control_msg_send(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, cmd1, cmd2, NULL, 0, LINE6_TIMEOUT, GFP_KERNEL); if (ret) { dev_err(&usbdev->dev, "send failed (error %d)\n", ret); return ret; } return 0; } /* monitor info callback */ static int snd_toneport_monitor_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 256; return 0; } /* monitor get callback */ static int snd_toneport_monitor_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = line6pcm->volume_monitor; return 0; } /* monitor put callback */ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); int err; if (ucontrol->value.integer.value[0] == line6pcm->volume_monitor) return 0; line6pcm->volume_monitor = ucontrol->value.integer.value[0]; if (line6pcm->volume_monitor > 0) { err = line6_pcm_acquire(line6pcm, LINE6_STREAM_MONITOR, true); if (err < 0) { line6pcm->volume_monitor = 0; line6_pcm_release(line6pcm, LINE6_STREAM_MONITOR); return err; } } else { line6_pcm_release(line6pcm, LINE6_STREAM_MONITOR); } return 1; } /* source info callback */ static int snd_toneport_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { const int size = ARRAY_SIZE(toneport_source_info); uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = size; if (uinfo->value.enumerated.item >= size) uinfo->value.enumerated.item = size - 1; strcpy(uinfo->value.enumerated.name, toneport_source_info[uinfo->value.enumerated.item].name); return 0; } /* source get callback */ static int snd_toneport_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); struct usb_line6_toneport *toneport = line6_to_toneport(line6pcm->line6); ucontrol->value.enumerated.item[0] = toneport->source; return 0; } /* source put callback */ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); struct usb_line6_toneport *toneport = line6_to_toneport(line6pcm->line6); unsigned int source; source = ucontrol->value.enumerated.item[0]; if (source >= ARRAY_SIZE(toneport_source_info)) return -EINVAL; if (source == toneport->source) return 0; toneport->source = source; toneport_send_cmd(toneport->line6.usbdev, toneport_source_info[source].code, 0x0000); return 1; } static void toneport_startup(struct usb_line6 *line6) { line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true); } /* control definition */ static const struct snd_kcontrol_new toneport_control_monitor = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Playback Volume", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_toneport_monitor_info, .get = snd_toneport_monitor_get, .put = snd_toneport_monitor_put }; /* source selector definition */ static const struct snd_kcontrol_new toneport_control_source = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Capture Source", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_toneport_source_info, .get = snd_toneport_source_get, .put = snd_toneport_source_put }; /* For the led on Guitarport. Brightness goes from 0x00 to 0x26. Set a value above this to have led blink. (void cmd_0x02(byte red, byte green) */ static bool toneport_has_led(struct usb_line6_toneport *toneport) { switch (toneport->type) { case LINE6_GUITARPORT: case LINE6_TONEPORT_GX: /* add your device here if you are missing support for the LEDs */ return true; default: return false; } } static const char * const toneport_led_colors[2] = { "red", "green" }; static const int toneport_led_init_vals[2] = { 0x00, 0x26 }; static void toneport_update_led(struct usb_line6_toneport *toneport) { toneport_send_cmd(toneport->line6.usbdev, (toneport->leds[0].dev.brightness << 8) | 0x0002, toneport->leds[1].dev.brightness); } static void toneport_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct toneport_led *leds = container_of(led_cdev, struct toneport_led, dev); toneport_update_led(leds->toneport); } static int toneport_init_leds(struct usb_line6_toneport *toneport) { struct device *dev = &toneport->line6.usbdev->dev; int i, err; for (i = 0; i < 2; i++) { struct toneport_led *led = &toneport->leds[i]; struct led_classdev *leddev = &led->dev; led->toneport = toneport; snprintf(led->name, sizeof(led->name), "%s::%s", dev_name(dev), toneport_led_colors[i]); leddev->name = led->name; leddev->brightness = toneport_led_init_vals[i]; leddev->max_brightness = 0x26; leddev->brightness_set = toneport_led_brightness_set; err = led_classdev_register(dev, leddev); if (err) return err; led->registered = true; } return 0; } static void toneport_remove_leds(struct usb_line6_toneport *toneport) { struct toneport_led *led; int i; for (i = 0; i < 2; i++) { led = &toneport->leds[i]; if (!led->registered) break; led_classdev_unregister(&led->dev); led->registered = false; } } static bool toneport_has_source_select(struct usb_line6_toneport *toneport) { switch (toneport->type) { case LINE6_TONEPORT_UX1: case LINE6_TONEPORT_UX2: case LINE6_PODSTUDIO_UX1: case LINE6_PODSTUDIO_UX2: return true; default: return false; } } /* Setup Toneport device. */ static int toneport_setup(struct usb_line6_toneport *toneport) { u32 *ticks; struct usb_line6 *line6 = &toneport->line6; struct usb_device *usbdev = line6->usbdev; ticks = kmalloc(sizeof(*ticks), GFP_KERNEL); if (!ticks) return -ENOMEM; /* sync time on device with host: */ /* note: 32-bit timestamps overflow in year 2106 */ *ticks = (u32)ktime_get_real_seconds(); line6_write_data(line6, 0x80c6, ticks, 4); kfree(ticks); /* enable device: */ toneport_send_cmd(usbdev, 0x0301, 0x0000); /* initialize source select: */ if (toneport_has_source_select(toneport)) toneport_send_cmd(usbdev, toneport_source_info[toneport->source].code, 0x0000); if (toneport_has_led(toneport)) toneport_update_led(toneport); schedule_delayed_work(&toneport->line6.startup_work, secs_to_jiffies(TONEPORT_PCM_DELAY)); return 0; } /* Toneport device disconnected. */ static void line6_toneport_disconnect(struct usb_line6 *line6) { struct usb_line6_toneport *toneport = line6_to_toneport(line6); if (toneport_has_led(toneport)) toneport_remove_leds(toneport); } /* Try to init Toneport device. */ static int toneport_init(struct usb_line6 *line6, const struct usb_device_id *id) { int err; struct usb_line6_toneport *toneport = line6_to_toneport(line6); toneport->type = id->driver_info; line6->disconnect = line6_toneport_disconnect; line6->startup = toneport_startup; /* initialize PCM subsystem: */ err = line6_init_pcm(line6, &toneport_pcm_properties); if (err < 0) return err; /* register monitor control: */ err = snd_ctl_add(line6->card, snd_ctl_new1(&toneport_control_monitor, line6->line6pcm)); if (err < 0) return err; /* register source select control: */ if (toneport_has_source_select(toneport)) { err = snd_ctl_add(line6->card, snd_ctl_new1(&toneport_control_source, line6->line6pcm)); if (err < 0) return err; } line6_read_serial_number(line6, &toneport->serial_number); line6_read_data(line6, 0x80c2, &toneport->firmware_version, 1); if (toneport_has_led(toneport)) { err = toneport_init_leds(toneport); if (err < 0) return err; } err = toneport_setup(toneport); if (err) return err; /* register audio system: */ return snd_card_register(line6->card); } #ifdef CONFIG_PM /* Resume Toneport device after reset. */ static int toneport_reset_resume(struct usb_interface *interface) { int err; err = toneport_setup(usb_get_intfdata(interface)); if (err) return err; return line6_resume(interface); } #endif #define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod) #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) /* table of devices that work with this driver */ static const struct usb_device_id toneport_id_table[] = { { LINE6_DEVICE(0x4750), .driver_info = LINE6_GUITARPORT }, { LINE6_DEVICE(0x4153), .driver_info = LINE6_PODSTUDIO_GX }, { LINE6_DEVICE(0x4150), .driver_info = LINE6_PODSTUDIO_UX1 }, { LINE6_IF_NUM(0x4151, 0), .driver_info = LINE6_PODSTUDIO_UX2 }, { LINE6_DEVICE(0x4147), .driver_info = LINE6_TONEPORT_GX }, { LINE6_DEVICE(0x4141), .driver_info = LINE6_TONEPORT_UX1 }, { LINE6_IF_NUM(0x4142, 0), .driver_info = LINE6_TONEPORT_UX2 }, {} }; MODULE_DEVICE_TABLE(usb, toneport_id_table); static const struct line6_properties toneport_properties_table[] = { [LINE6_GUITARPORT] = { .id = "GuitarPort", .name = "GuitarPort", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* 1..4 seem to be ok */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_PODSTUDIO_GX] = { .id = "PODStudioGX", .name = "POD Studio GX", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* 1..4 seem to be ok */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_PODSTUDIO_UX1] = { .id = "PODStudioUX1", .name = "POD Studio UX1", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* 1..4 seem to be ok */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_PODSTUDIO_UX2] = { .id = "PODStudioUX2", .name = "POD Studio UX2", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* defaults to 44.1kHz, 16-bit */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_TONEPORT_GX] = { .id = "TonePortGX", .name = "TonePort GX", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* 1..4 seem to be ok */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_TONEPORT_UX1] = { .id = "TonePortUX1", .name = "TonePort UX1", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* 1..4 seem to be ok */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_TONEPORT_UX2] = { .id = "TonePortUX2", .name = "TonePort UX2", .capabilities = LINE6_CAP_PCM, .altsetting = 2, /* defaults to 44.1kHz, 16-bit */ /* no control channel */ .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, }; /* Probe USB device. */ static int toneport_probe(struct usb_interface *interface, const struct usb_device_id *id) { return line6_probe(interface, id, "Line6-TonePort", &toneport_properties_table[id->driver_info], toneport_init, sizeof(struct usb_line6_toneport)); } static struct usb_driver toneport_driver = { .name = KBUILD_MODNAME, .probe = toneport_probe, .disconnect = line6_disconnect, #ifdef CONFIG_PM .suspend = line6_suspend, .resume = line6_resume, .reset_resume = toneport_reset_resume, #endif .id_table = toneport_id_table, }; module_usb_driver(toneport_driver); MODULE_DESCRIPTION("TonePort USB driver"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_PCM_H #define __SOUND_PCM_H /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> */ #include <sound/asound.h> #include <sound/memalloc.h> #include <sound/minors.h> #include <linux/poll.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/pm_qos.h> #include <linux/refcount.h> #include <linux/uio.h> #define snd_pcm_substream_chip(substream) ((substream)->private_data) #define snd_pcm_chip(pcm) ((pcm)->private_data) #if IS_ENABLED(CONFIG_SND_PCM_OSS) #include <sound/pcm_oss.h> #endif /* * Hardware (lowlevel) section */ struct snd_pcm_hardware { unsigned int info; /* SNDRV_PCM_INFO_* */ u64 formats; /* SNDRV_PCM_FMTBIT_* */ u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */ unsigned int rates; /* SNDRV_PCM_RATE_* */ unsigned int rate_min; /* min rate */ unsigned int rate_max; /* max rate */ unsigned int channels_min; /* min channels */ unsigned int channels_max; /* max channels */ size_t buffer_bytes_max; /* max buffer size */ size_t period_bytes_min; /* min period size */ size_t period_bytes_max; /* max period size */ unsigned int periods_min; /* min # of periods */ unsigned int periods_max; /* max # of periods */ size_t fifo_size; /* fifo size in bytes */ }; struct snd_pcm_status64; struct snd_pcm_substream; struct snd_pcm_audio_tstamp_config; /* definitions further down */ struct snd_pcm_audio_tstamp_report; struct snd_pcm_ops { int (*open)(struct snd_pcm_substream *substream); int (*close)(struct snd_pcm_substream *substream); int (*ioctl)(struct snd_pcm_substream * substream, unsigned int cmd, void *arg); int (*hw_params)(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); int (*hw_free)(struct snd_pcm_substream *substream); int (*prepare)(struct snd_pcm_substream *substream); int (*trigger)(struct snd_pcm_substream *substream, int cmd); int (*sync_stop)(struct snd_pcm_substream *substream); snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream); int (*get_time_info)(struct snd_pcm_substream *substream, struct timespec64 *system_ts, struct timespec64 *audio_ts, struct snd_pcm_audio_tstamp_config *audio_tstamp_config, struct snd_pcm_audio_tstamp_report *audio_tstamp_report); int (*fill_silence)(struct snd_pcm_substream *substream, int channel, unsigned long pos, unsigned long bytes); int (*copy)(struct snd_pcm_substream *substream, int channel, unsigned long pos, struct iov_iter *iter, unsigned long bytes); struct page *(*page)(struct snd_pcm_substream *substream, unsigned long offset); int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma); int (*ack)(struct snd_pcm_substream *substream); }; /* * */ #if defined(CONFIG_SND_DYNAMIC_MINORS) #define SNDRV_PCM_DEVICES (SNDRV_OS_MINORS-2) #else #define SNDRV_PCM_DEVICES 8 #endif #define SNDRV_PCM_IOCTL1_RESET 0 /* 1 is absent slot. */ #define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2 /* 3 is absent slot. */ #define SNDRV_PCM_IOCTL1_FIFO_SIZE 4 #define SNDRV_PCM_IOCTL1_SYNC_ID 5 #define SNDRV_PCM_TRIGGER_STOP 0 #define SNDRV_PCM_TRIGGER_START 1 #define SNDRV_PCM_TRIGGER_PAUSE_PUSH 2 #define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 3 #define SNDRV_PCM_TRIGGER_SUSPEND 4 #define SNDRV_PCM_TRIGGER_RESUME 5 #define SNDRV_PCM_TRIGGER_DRAIN 6 #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) /* If you change this don't forget to change rates[] table in pcm_native.c */ #define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */ #define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */ #define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */ #define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */ #define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */ #define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */ #define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */ #define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */ #define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */ #define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */ #define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */ #define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */ #define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ #define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ #define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ #define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */ #define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */ /* extended rates since 6.12 */ #define SNDRV_PCM_RATE_12000 (1U<<17) /* 12000Hz */ #define SNDRV_PCM_RATE_24000 (1U<<18) /* 24000Hz */ #define SNDRV_PCM_RATE_128000 (1U<<19) /* 128000Hz */ #define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ #define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100) #define SNDRV_PCM_RATE_8000_48000 (SNDRV_PCM_RATE_8000_44100|SNDRV_PCM_RATE_48000) #define SNDRV_PCM_RATE_8000_96000 (SNDRV_PCM_RATE_8000_48000|SNDRV_PCM_RATE_64000|\ SNDRV_PCM_RATE_88200|SNDRV_PCM_RATE_96000) #define SNDRV_PCM_RATE_8000_192000 (SNDRV_PCM_RATE_8000_96000|SNDRV_PCM_RATE_176400|\ SNDRV_PCM_RATE_192000) #define SNDRV_PCM_RATE_8000_384000 (SNDRV_PCM_RATE_8000_192000|\ SNDRV_PCM_RATE_352800|\ SNDRV_PCM_RATE_384000) #define SNDRV_PCM_RATE_8000_768000 (SNDRV_PCM_RATE_8000_384000|\ SNDRV_PCM_RATE_705600|\ SNDRV_PCM_RATE_768000) #define _SNDRV_PCM_FMTBIT(fmt) (1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt) #define SNDRV_PCM_FMTBIT_S8 _SNDRV_PCM_FMTBIT(S8) #define SNDRV_PCM_FMTBIT_U8 _SNDRV_PCM_FMTBIT(U8) #define SNDRV_PCM_FMTBIT_S16_LE _SNDRV_PCM_FMTBIT(S16_LE) #define SNDRV_PCM_FMTBIT_S16_BE _SNDRV_PCM_FMTBIT(S16_BE) #define SNDRV_PCM_FMTBIT_U16_LE _SNDRV_PCM_FMTBIT(U16_LE) #define SNDRV_PCM_FMTBIT_U16_BE _SNDRV_PCM_FMTBIT(U16_BE) #define SNDRV_PCM_FMTBIT_S24_LE _SNDRV_PCM_FMTBIT(S24_LE) #define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE) #define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE) #define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE) // For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the // available bit count in most significant bit. It's for the case of so-called 'left-justified' or // `right-padding` sample which has less width than 32 bit. #define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE) #define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE) #define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE) #define SNDRV_PCM_FMTBIT_U32_BE _SNDRV_PCM_FMTBIT(U32_BE) #define SNDRV_PCM_FMTBIT_FLOAT_LE _SNDRV_PCM_FMTBIT(FLOAT_LE) #define SNDRV_PCM_FMTBIT_FLOAT_BE _SNDRV_PCM_FMTBIT(FLOAT_BE) #define SNDRV_PCM_FMTBIT_FLOAT64_LE _SNDRV_PCM_FMTBIT(FLOAT64_LE) #define SNDRV_PCM_FMTBIT_FLOAT64_BE _SNDRV_PCM_FMTBIT(FLOAT64_BE) #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_LE) #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_BE) #define SNDRV_PCM_FMTBIT_MU_LAW _SNDRV_PCM_FMTBIT(MU_LAW) #define SNDRV_PCM_FMTBIT_A_LAW _SNDRV_PCM_FMTBIT(A_LAW) #define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM) #define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG) #define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM) #define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE) #define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE) #define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE) #define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE) #define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL) #define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE) #define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE) #define SNDRV_PCM_FMTBIT_S24_3BE _SNDRV_PCM_FMTBIT(S24_3BE) #define SNDRV_PCM_FMTBIT_U24_3BE _SNDRV_PCM_FMTBIT(U24_3BE) #define SNDRV_PCM_FMTBIT_S20_3LE _SNDRV_PCM_FMTBIT(S20_3LE) #define SNDRV_PCM_FMTBIT_U20_3LE _SNDRV_PCM_FMTBIT(U20_3LE) #define SNDRV_PCM_FMTBIT_S20_3BE _SNDRV_PCM_FMTBIT(S20_3BE) #define SNDRV_PCM_FMTBIT_U20_3BE _SNDRV_PCM_FMTBIT(U20_3BE) #define SNDRV_PCM_FMTBIT_S18_3LE _SNDRV_PCM_FMTBIT(S18_3LE) #define SNDRV_PCM_FMTBIT_U18_3LE _SNDRV_PCM_FMTBIT(U18_3LE) #define SNDRV_PCM_FMTBIT_S18_3BE _SNDRV_PCM_FMTBIT(S18_3BE) #define SNDRV_PCM_FMTBIT_U18_3BE _SNDRV_PCM_FMTBIT(U18_3BE) #define SNDRV_PCM_FMTBIT_G723_24 _SNDRV_PCM_FMTBIT(G723_24) #define SNDRV_PCM_FMTBIT_G723_24_1B _SNDRV_PCM_FMTBIT(G723_24_1B) #define SNDRV_PCM_FMTBIT_G723_40 _SNDRV_PCM_FMTBIT(G723_40) #define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B) #define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) #define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) #define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) #define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE) #define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE) #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE #define SNDRV_PCM_FMTBIT_U16 SNDRV_PCM_FMTBIT_U16_LE #define SNDRV_PCM_FMTBIT_S24 SNDRV_PCM_FMTBIT_S24_LE #define SNDRV_PCM_FMTBIT_U24 SNDRV_PCM_FMTBIT_U24_LE #define SNDRV_PCM_FMTBIT_S32 SNDRV_PCM_FMTBIT_S32_LE #define SNDRV_PCM_FMTBIT_U32 SNDRV_PCM_FMTBIT_U32_LE #define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE #define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE #define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE #define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE #endif #ifdef SNDRV_BIG_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE #define SNDRV_PCM_FMTBIT_U16 SNDRV_PCM_FMTBIT_U16_BE #define SNDRV_PCM_FMTBIT_S24 SNDRV_PCM_FMTBIT_S24_BE #define SNDRV_PCM_FMTBIT_U24 SNDRV_PCM_FMTBIT_U24_BE #define SNDRV_PCM_FMTBIT_S32 SNDRV_PCM_FMTBIT_S32_BE #define SNDRV_PCM_FMTBIT_U32 SNDRV_PCM_FMTBIT_U32_BE #define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE #define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE #define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE #define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE #endif #define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt) #define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD) #define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX) #define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20) #define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24) struct snd_pcm_file { struct snd_pcm_substream *substream; int no_compat_mmap; unsigned int user_pversion; /* supported protocol version */ }; struct snd_pcm_hw_rule; typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule); struct snd_pcm_hw_rule { unsigned int cond; int var; int deps[5]; snd_pcm_hw_rule_func_t func; void *private; }; struct snd_pcm_hw_constraints { struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1]; unsigned int rules_num; unsigned int rules_all; struct snd_pcm_hw_rule *rules; }; static inline struct snd_mask *constrs_mask(struct snd_pcm_hw_constraints *constrs, snd_pcm_hw_param_t var) { return &constrs->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline struct snd_interval *constrs_interval(struct snd_pcm_hw_constraints *constrs, snd_pcm_hw_param_t var) { return &constrs->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } struct snd_ratnum { unsigned int num; unsigned int den_min, den_max, den_step; }; struct snd_ratden { unsigned int num_min, num_max, num_step; unsigned int den; }; struct snd_pcm_hw_constraint_ratnums { int nrats; const struct snd_ratnum *rats; }; struct snd_pcm_hw_constraint_ratdens { int nrats; const struct snd_ratden *rats; }; struct snd_pcm_hw_constraint_list { const unsigned int *list; unsigned int count; unsigned int mask; }; struct snd_pcm_hw_constraint_ranges { unsigned int count; const struct snd_interval *ranges; unsigned int mask; }; /* * userspace-provided audio timestamp config to kernel, * structure is for internal use only and filled with dedicated unpack routine */ struct snd_pcm_audio_tstamp_config { /* 5 of max 16 bits used */ u32 type_requested:4; u32 report_delay:1; /* add total delay to A/D or D/A */ }; static inline void snd_pcm_unpack_audio_tstamp_config(__u32 data, struct snd_pcm_audio_tstamp_config *config) { config->type_requested = data & 0xF; config->report_delay = (data >> 4) & 1; } /* * kernel-provided audio timestamp report to user-space * structure is for internal use only and read by dedicated pack routine */ struct snd_pcm_audio_tstamp_report { /* 6 of max 16 bits used for bit-fields */ /* for backwards compatibility */ u32 valid:1; /* actual type if hardware could not support requested timestamp */ u32 actual_type:4; /* accuracy represented in ns units */ u32 accuracy_report:1; /* 0 if accuracy unknown, 1 if accuracy field is valid */ u32 accuracy; /* up to 4.29s, will be packed in separate field */ }; static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy, const struct snd_pcm_audio_tstamp_report *report) { u32 tmp; tmp = report->accuracy_report; tmp <<= 4; tmp |= report->actual_type; tmp <<= 1; tmp |= report->valid; *data &= 0xffff; /* zero-clear MSBs */ *data |= (tmp << 16); *accuracy = report->accuracy; } struct snd_pcm_runtime { /* -- Status -- */ snd_pcm_state_t state; /* stream state */ snd_pcm_state_t suspended_state; /* suspended stream state */ struct snd_pcm_substream *trigger_master; struct timespec64 trigger_tstamp; /* trigger timestamp */ bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */ int overrange; snd_pcm_uframes_t avail_max; snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */ unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */ unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */ snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */ u64 hw_ptr_wrap; /* offset for hw_ptr due to boundary wrap-around */ /* -- HW params -- */ snd_pcm_access_t access; /* access mode */ snd_pcm_format_t format; /* SNDRV_PCM_FORMAT_* */ snd_pcm_subformat_t subformat; /* subformat */ unsigned int rate; /* rate in Hz */ unsigned int channels; /* channels */ snd_pcm_uframes_t period_size; /* period size */ unsigned int periods; /* periods */ snd_pcm_uframes_t buffer_size; /* buffer size */ snd_pcm_uframes_t min_align; /* Min alignment for the format */ size_t byte_align; unsigned int frame_bits; unsigned int sample_bits; unsigned int info; unsigned int rate_num; unsigned int rate_den; unsigned int no_period_wakeup: 1; /* -- SW params; see struct snd_pcm_sw_params for comments -- */ int tstamp_mode; unsigned int period_step; snd_pcm_uframes_t start_threshold; snd_pcm_uframes_t stop_threshold; snd_pcm_uframes_t silence_threshold; snd_pcm_uframes_t silence_size; snd_pcm_uframes_t boundary; /* internal data of auto-silencer */ snd_pcm_uframes_t silence_start; /* starting pointer to silence area */ snd_pcm_uframes_t silence_filled; /* already filled part of silence area */ bool std_sync_id; /* hardware synchronization - standard per card ID */ /* -- mmap -- */ struct snd_pcm_mmap_status *status; struct snd_pcm_mmap_control *control; /* -- locking / scheduling -- */ snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */ wait_queue_head_t sleep; /* poll sleep */ wait_queue_head_t tsleep; /* transfer sleep */ struct snd_fasync *fasync; bool stop_operating; /* sync_stop will be called */ struct mutex buffer_mutex; /* protect for buffer changes */ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ /* -- private section -- */ void *private_data; void (*private_free)(struct snd_pcm_runtime *runtime); /* -- hardware description -- */ struct snd_pcm_hardware hw; struct snd_pcm_hw_constraints hw_constraints; /* -- timer -- */ unsigned int timer_resolution; /* timer resolution */ int tstamp_type; /* timestamp type */ /* -- DMA -- */ unsigned char *dma_area; /* DMA area */ dma_addr_t dma_addr; /* physical bus address (not accessible from main CPU) */ size_t dma_bytes; /* size of DMA area */ struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */ unsigned int buffer_changed:1; /* buffer allocation changed; set only in managed mode */ /* -- audio timestamp config -- */ struct snd_pcm_audio_tstamp_config audio_tstamp_config; struct snd_pcm_audio_tstamp_report audio_tstamp_report; struct timespec64 driver_tstamp; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_runtime oss; #endif }; struct snd_pcm_group { /* keep linked substreams */ spinlock_t lock; struct mutex mutex; struct list_head substreams; refcount_t refs; }; struct pid; struct snd_pcm_substream { struct snd_pcm *pcm; struct snd_pcm_str *pstr; void *private_data; /* copied from pcm->private_data */ int number; char name[32]; /* substream name */ int stream; /* stream (direction) */ struct pm_qos_request latency_pm_qos_req; /* pm_qos request */ size_t buffer_bytes_max; /* limit ring buffer size */ struct snd_dma_buffer dma_buffer; size_t dma_max; /* -- hardware operations -- */ const struct snd_pcm_ops *ops; /* -- runtime information -- */ struct snd_pcm_runtime *runtime; /* -- timer section -- */ struct snd_timer *timer; /* timer */ unsigned timer_running: 1; /* time is running */ long wait_time; /* time in ms for R/W to wait for avail */ /* -- next substream -- */ struct snd_pcm_substream *next; /* -- linked substreams -- */ struct list_head link_list; /* linked list member */ struct snd_pcm_group self_group; /* fake group for non linked substream (with substream lock inside) */ struct snd_pcm_group *group; /* pointer to current group */ /* -- assigned files -- */ int ref_count; atomic_t mmap_count; unsigned int f_flags; void (*pcm_release)(struct snd_pcm_substream *); struct pid *pid; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_substream oss; #endif #ifdef CONFIG_SND_VERBOSE_PROCFS struct snd_info_entry *proc_root; #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* misc flags */ unsigned int hw_opened: 1; unsigned int managed_buffer_alloc:1; #ifdef CONFIG_SND_PCM_XRUN_DEBUG unsigned int xrun_counter; /* number of times xrun happens */ #endif /* CONFIG_SND_PCM_XRUN_DEBUG */ }; #define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0) struct snd_pcm_str { int stream; /* stream (direction) */ struct snd_pcm *pcm; /* -- substreams -- */ unsigned int substream_count; unsigned int substream_opened; struct snd_pcm_substream *substream; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_stream oss; #endif #ifdef CONFIG_SND_VERBOSE_PROCFS struct snd_info_entry *proc_root; #ifdef CONFIG_SND_PCM_XRUN_DEBUG unsigned int xrun_debug; /* 0 = disabled, 1 = verbose, 2 = stacktrace */ #endif #endif struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */ struct device *dev; }; struct snd_pcm { struct snd_card *card; struct list_head list; int device; /* device number */ unsigned int info_flags; unsigned short dev_class; unsigned short dev_subclass; char id[64]; char name[80]; struct snd_pcm_str streams[2]; struct mutex open_mutex; wait_queue_head_t open_wait; void *private_data; void (*private_free) (struct snd_pcm *pcm); bool internal; /* pcm is for internal use only */ bool nonatomic; /* whole PCM operations are in non-atomic context */ bool no_device_suspend; /* don't invoke device PM suspend */ #if IS_ENABLED(CONFIG_SND_PCM_OSS) struct snd_pcm_oss oss; #endif }; /* * Registering */ extern const struct file_operations snd_pcm_f_ops[2]; int snd_pcm_new(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm); int snd_pcm_new_internal(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm); int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count); #if IS_ENABLED(CONFIG_SND_PCM_OSS) struct snd_pcm_notify { int (*n_register) (struct snd_pcm * pcm); int (*n_disconnect) (struct snd_pcm * pcm); int (*n_unregister) (struct snd_pcm * pcm); struct list_head list; }; int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree); #endif /* * Native I/O */ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user *info); int snd_pcm_status64(struct snd_pcm_substream *substream, struct snd_pcm_status64 *status); int snd_pcm_start(struct snd_pcm_substream *substream); int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status); int snd_pcm_drain_done(struct snd_pcm_substream *substream); int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend_all(struct snd_pcm *pcm); #else static inline int snd_pcm_suspend_all(struct snd_pcm *pcm) { return 0; } #endif int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream); void snd_pcm_release_substream(struct snd_pcm_substream *substream); int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream); void snd_pcm_detach_substream(struct snd_pcm_substream *substream); int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); #ifdef CONFIG_SND_DEBUG void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len); #else static inline void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size) { *buf = 0; } #endif /* * PCM library */ /** * snd_pcm_stream_linked - Check whether the substream is linked with others * @substream: substream to check * * Return: true if the given substream is being linked with others */ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) { return substream->group != &substream->self_group; } void snd_pcm_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream); /** * snd_pcm_stream_lock_irqsave - Lock the PCM stream * @substream: PCM substream * @flags: irq flags * * This locks the PCM stream like snd_pcm_stream_lock() but with the local * IRQ (only when nonatomic is false). In nonatomic case, this is identical * as snd_pcm_stream_lock(). */ #define snd_pcm_stream_lock_irqsave(substream, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _snd_pcm_stream_lock_irqsave(substream); \ } while (0) void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags); /** * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking * @substream: PCM substream * @flags: irq flags * * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with * the single-depth lockdep subclass. */ #define snd_pcm_stream_lock_irqsave_nested(substream, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \ } while (0) /* definitions for guard(); use like guard(pcm_stream_lock) */ DEFINE_LOCK_GUARD_1(pcm_stream_lock, struct snd_pcm_substream, snd_pcm_stream_lock(_T->lock), snd_pcm_stream_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(pcm_stream_lock_irq, struct snd_pcm_substream, snd_pcm_stream_lock_irq(_T->lock), snd_pcm_stream_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(pcm_stream_lock_irqsave, struct snd_pcm_substream, snd_pcm_stream_lock_irqsave(_T->lock, _T->flags), snd_pcm_stream_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) /** * snd_pcm_group_for_each_entry - iterate over the linked substreams * @s: the iterator * @substream: the substream * * Iterate over the all linked substreams to the given @substream. * When @substream isn't linked with any others, this gives returns @substream * itself once. */ #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) #define for_each_pcm_streams(stream) \ for (stream = SNDRV_PCM_STREAM_PLAYBACK; \ stream <= SNDRV_PCM_STREAM_LAST; \ stream++) /** * snd_pcm_running - Check whether the substream is in a running state * @substream: substream to check * * Return: true if the given substream is in the state RUNNING, or in the * state DRAINING for playback. */ static inline int snd_pcm_running(struct snd_pcm_substream *substream) { return (substream->runtime->state == SNDRV_PCM_STATE_RUNNING || (substream->runtime->state == SNDRV_PCM_STATE_DRAINING && substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); } /** * __snd_pcm_set_state - Change the current PCM state * @runtime: PCM runtime to set * @state: the current state to set * * Call within the stream lock */ static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime, snd_pcm_state_t state) { runtime->state = state; runtime->status->state = state; /* copy for mmap */ } /** * bytes_to_samples - Unit conversion of the size from bytes to samples * @runtime: PCM runtime instance * @size: size in bytes * * Return: the size in samples */ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->sample_bits; } /** * bytes_to_frames - Unit conversion of the size from bytes to frames * @runtime: PCM runtime instance * @size: size in bytes * * Return: the size in frames */ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->frame_bits; } /** * samples_to_bytes - Unit conversion of the size from samples to bytes * @runtime: PCM runtime instance * @size: size in samples * * Return: the byte size */ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size) { return size * runtime->sample_bits / 8; } /** * frames_to_bytes - Unit conversion of the size from frames to bytes * @runtime: PCM runtime instance * @size: size in frames * * Return: the byte size */ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size) { return size * runtime->frame_bits / 8; } /** * frame_aligned - Check whether the byte size is aligned to frames * @runtime: PCM runtime instance * @bytes: size in bytes * * Return: true if aligned, or false if not */ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) { return bytes % runtime->byte_align == 0; } /** * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes * @substream: PCM substream * * Return: buffer byte size */ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->buffer_size); } /** * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes * @substream: PCM substream * * Return: period byte size */ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->period_size); } /** * snd_pcm_playback_avail - Get the available (writable) space for playback * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) * * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime) { snd_pcm_sframes_t avail = runtime->status->hw_ptr + runtime->buffer_size - runtime->control->appl_ptr; if (avail < 0) avail += runtime->boundary; else if ((snd_pcm_uframes_t) avail >= runtime->boundary) avail -= runtime->boundary; return avail; } /** * snd_pcm_capture_avail - Get the available (readable) space for capture * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) * * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime) { snd_pcm_sframes_t avail = runtime->status->hw_ptr - runtime->control->appl_ptr; if (avail < 0) avail += runtime->boundary; return avail; } /** * snd_pcm_playback_hw_avail - Get the queued space for playback * @runtime: PCM runtime instance * * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_playback_avail(runtime); } /** * snd_pcm_capture_hw_avail - Get the free space for capture * @runtime: PCM runtime instance * * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_capture_avail(runtime); } /** * snd_pcm_playback_ready - check whether the playback buffer is available * @substream: the pcm substream instance * * Checks whether enough free space is available on the playback buffer. * * Return: Non-zero if available, or zero if not. */ static inline int snd_pcm_playback_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_playback_avail(runtime) >= runtime->control->avail_min; } /** * snd_pcm_capture_ready - check whether the capture buffer is available * @substream: the pcm substream instance * * Checks whether enough capture data is available on the capture buffer. * * Return: Non-zero if available, or zero if not. */ static inline int snd_pcm_capture_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_capture_avail(runtime) >= runtime->control->avail_min; } /** * snd_pcm_playback_data - check whether any data exists on the playback buffer * @substream: the pcm substream instance * * Checks whether any data exists on the playback buffer. * * Return: Non-zero if any data exists, or zero if not. If stop_threshold * is bigger or equal to boundary, then this function returns always non-zero. */ static inline int snd_pcm_playback_data(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->stop_threshold >= runtime->boundary) return 1; return snd_pcm_playback_avail(runtime) < runtime->buffer_size; } /** * snd_pcm_playback_empty - check whether the playback buffer is empty * @substream: the pcm substream instance * * Checks whether the playback buffer is empty. * * Return: Non-zero if empty, or zero if not. */ static inline int snd_pcm_playback_empty(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_playback_avail(runtime) >= runtime->buffer_size; } /** * snd_pcm_capture_empty - check whether the capture buffer is empty * @substream: the pcm substream instance * * Checks whether the capture buffer is empty. * * Return: Non-zero if empty, or zero if not. */ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_capture_avail(runtime) == 0; } /** * snd_pcm_trigger_done - Mark the master substream * @substream: the pcm substream instance * @master: the linked master substream * * When multiple substreams of the same card are linked and the hardware * supports the single-shot operation, the driver calls this in the loop * in snd_pcm_group_for_each_entry() for marking the substream as "done". * Then most of trigger operations are performed only to the given master * substream. * * The trigger_master mark is cleared at timestamp updates at the end * of trigger operations. */ static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream, struct snd_pcm_substream *master) { substream->runtime->trigger_master = master; } static inline int hw_is_mask(int var) { return var >= SNDRV_PCM_HW_PARAM_FIRST_MASK && var <= SNDRV_PCM_HW_PARAM_LAST_MASK; } static inline int hw_is_interval(int var) { return var >= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL && var <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; } static inline struct snd_mask *hw_param_mask(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return &params->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline struct snd_interval *hw_param_interval(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } static inline const struct snd_mask *hw_param_mask_c(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return &params->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline const struct snd_interval *hw_param_interval_c(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } /** * params_channels - Get the number of channels from the hw params * @p: hw params * * Return: the number of channels */ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min; } /** * params_rate - Get the sample rate from the hw params * @p: hw params * * Return: the sample rate */ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min; } /** * params_period_size - Get the period size (in frames) from the hw params * @p: hw params * * Return: the period size in frames */ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min; } /** * params_periods - Get the number of periods from the hw params * @p: hw params * * Return: the number of periods */ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min; } /** * params_buffer_size - Get the buffer size (in frames) from the hw params * @p: hw params * * Return: the buffer size in frames */ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min; } /** * params_buffer_bytes - Get the buffer size (in bytes) from the hw params * @p: hw params * * Return: the buffer size in bytes */ static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min; } int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v); int snd_interval_list(struct snd_interval *i, unsigned int count, const unsigned int *list, unsigned int mask); int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *list, unsigned int mask); int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp); void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var); int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask); int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max); int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var); int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_list *l); int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ranges *r); int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratnums *r); int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratdens *r); int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits); int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step); int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var); int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, unsigned int base_rate); int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...); /** * snd_pcm_hw_constraint_single() - Constrain parameter to a single value * @runtime: PCM runtime instance * @var: The hw_params variable to constrain * @val: The value to constrain to * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ static inline int snd_pcm_hw_constraint_single( struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int val) { return snd_pcm_hw_constraint_minmax(runtime, var, val, val); } int snd_pcm_format_signed(snd_pcm_format_t format); int snd_pcm_format_unsigned(snd_pcm_format_t format); int snd_pcm_format_linear(snd_pcm_format_t format); int snd_pcm_format_little_endian(snd_pcm_format_t format); int snd_pcm_format_big_endian(snd_pcm_format_t format); #if 0 /* just for kernel-doc */ /** * snd_pcm_format_cpu_endian - Check the PCM format is CPU-endian * @format: the format to check * * Return: 1 if the given PCM format is CPU-endian, 0 if * opposite, or a negative error code if endian not specified. */ int snd_pcm_format_cpu_endian(snd_pcm_format_t format); #endif /* DocBook */ #ifdef SNDRV_LITTLE_ENDIAN #define snd_pcm_format_cpu_endian(format) snd_pcm_format_little_endian(format) #else #define snd_pcm_format_cpu_endian(format) snd_pcm_format_big_endian(format) #endif int snd_pcm_format_width(snd_pcm_format_t format); /* in bits */ int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */ ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples); const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format); int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames); void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, const struct snd_pcm_ops *ops); void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, const unsigned char *id, unsigned int len); /** * snd_pcm_set_sync - set the PCM sync id * @substream: the pcm substream * * Use the default PCM sync identifier for the specific card. */ static inline void snd_pcm_set_sync(struct snd_pcm_substream *substream) { substream->runtime->std_sync_id = true; } int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_period_elapsed(struct snd_pcm_substream *substream); snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *buf, bool interleaved, snd_pcm_uframes_t frames, bool in_kernel); static inline snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false); } static inline snd_pcm_sframes_t snd_pcm_kernel_write(struct snd_pcm_substream *substream, const void *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_read(struct snd_pcm_substream *substream, void *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, buf, true, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_writev(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, bufs, false, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_readv(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, bufs, false, frames, true); } int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw); static inline int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime) { return snd_pcm_hw_limit_rates(&runtime->hw); } unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate); unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, unsigned int rates_b); unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min, unsigned int rate_max); /** * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer * @substream: PCM substream to set * @bufp: the buffer information, NULL to clear * * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL. * Otherwise it clears the current buffer information. */ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream, struct snd_dma_buffer *bufp) { struct snd_pcm_runtime *runtime = substream->runtime; if (bufp) { runtime->dma_buffer_p = bufp; runtime->dma_area = bufp->area; runtime->dma_addr = bufp->addr; runtime->dma_bytes = bufp->bytes; } else { runtime->dma_buffer_p = NULL; runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } } /** * snd_pcm_gettime - Fill the timespec64 depending on the timestamp mode * @runtime: PCM runtime instance * @tv: timespec64 to fill */ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime, struct timespec64 *tv) { switch (runtime->tstamp_type) { case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC: ktime_get_ts64(tv); break; case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW: ktime_get_raw_ts64(tv); break; default: ktime_get_real_ts64(tv); break; } } /* * Memory */ void snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream); void snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm); void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max); void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm, int type, void *data, size_t size, size_t max); int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size); int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream); int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max); int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, struct device *data, size_t size, size_t max); /** * snd_pcm_set_fixed_buffer - Preallocate and set up the fixed size PCM buffer * @substream: the pcm substream instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * * This is a variant of snd_pcm_set_managed_buffer(), but this pre-allocates * only the given sized buffer and doesn't allow re-allocation nor dynamic * allocation of a larger buffer unlike the standard one. * The function may return -ENOMEM error, hence the caller must check it. * * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type, struct device *data, size_t size) { return snd_pcm_set_managed_buffer(substream, type, data, size, 0); } /** * snd_pcm_set_fixed_buffer_all - Preallocate and set up the fixed size PCM buffer * @pcm: the pcm instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for * all substream. If any of allocation fails, it returns -ENOMEM, hence the * caller must check the return value. * * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type, struct device *data, size_t size) { return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0); } #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) /** * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset * @substream: PCM substream * @ofs: byte offset * * Return: DMA address */ static inline dma_addr_t snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs); } /** * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the * contig. page from the given size * @substream: PCM substream * @ofs: byte offset * @size: byte size to examine * * Return: chunk size */ static inline unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, unsigned int ofs, unsigned int size) { return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size); } int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area); /* mmap for io-memory area */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) #define SNDRV_PCM_INFO_MMAP_IOMEM SNDRV_PCM_INFO_MMAP int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area); #else #define SNDRV_PCM_INFO_MMAP_IOMEM 0 #define snd_pcm_lib_mmap_iomem NULL #endif /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number * @max: pointer to store the max size */ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) { *max = dma < 4 ? 64 * 1024 : 128 * 1024; } /* * Misc */ #define SNDRV_PCM_DEFAULT_CON_SPDIF (IEC958_AES0_CON_EMPHASIS_NONE|\ (IEC958_AES1_CON_ORIGINAL<<8)|\ (IEC958_AES1_CON_PCM_CODER<<8)|\ (IEC958_AES3_CON_FS_48000<<24)) const char *snd_pcm_format_name(snd_pcm_format_t format); /** * snd_pcm_direction_name - Get a string naming the direction of a stream * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX * * Returns a string naming the direction of the stream. */ static inline const char *snd_pcm_direction_name(int direction) { if (direction == SNDRV_PCM_STREAM_PLAYBACK) return "Playback"; else return "Capture"; } /** * snd_pcm_stream_str - Get a string naming the direction of a stream * @substream: the pcm substream instance * * Return: A string naming the direction of the stream. */ static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) { return snd_pcm_direction_name(substream->stream); } /* * PCM channel-mapping control API */ /* array element of channel maps */ struct snd_pcm_chmap_elem { unsigned char channels; unsigned char map[15]; }; /* channel map information; retrieved via snd_kcontrol_chip() */ struct snd_pcm_chmap { struct snd_pcm *pcm; /* assigned PCM instance */ int stream; /* PLAYBACK or CAPTURE */ struct snd_kcontrol *kctl; const struct snd_pcm_chmap_elem *chmap; unsigned int max_channels; unsigned int channel_mask; /* optional: active channels bitmask */ void *private_data; /* optional: private data pointer */ }; /** * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info * @info: chmap information * @idx: the substream number index * * Return: the matched PCM substream, or NULL if not found */ static inline struct snd_pcm_substream * snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx) { struct snd_pcm_substream *s; for (s = info->pcm->streams[info->stream].substream; s; s = s->next) if (s->number == idx) return s; return NULL; } /* ALSA-standard channel maps (RL/RR prior to C/LFE) */ extern const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[]; /* Other world's standard channel maps (C/LFE prior to RL/RR) */ extern const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[]; /* bit masks to be passed to snd_pcm_chmap.channel_mask field */ #define SND_PCM_CHMAP_MASK_24 ((1U << 2) | (1U << 4)) #define SND_PCM_CHMAP_MASK_246 (SND_PCM_CHMAP_MASK_24 | (1U << 6)) #define SND_PCM_CHMAP_MASK_2468 (SND_PCM_CHMAP_MASK_246 | (1U << 8)) int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, const struct snd_pcm_chmap_elem *chmap, int max_channels, unsigned long private_value, struct snd_pcm_chmap **info_ret); /** * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise * @pcm_format: PCM format * * Return: 64bit mask corresponding to the given PCM format */ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) { return 1ULL << (__force int) pcm_format; } /** * pcm_for_each_format - helper to iterate for each format type * @f: the iterator variable in snd_pcm_format_t type */ #define pcm_for_each_format(f) \ for ((f) = SNDRV_PCM_FORMAT_FIRST; \ (__force int)(f) <= (__force int)SNDRV_PCM_FORMAT_LAST; \ (f) = (__force snd_pcm_format_t)((__force int)(f) + 1)) /* printk helpers */ #define pcm_err(pcm, fmt, args...) \ dev_err((pcm)->card->dev, fmt, ##args) #define pcm_warn(pcm, fmt, args...) \ dev_warn((pcm)->card->dev, fmt, ##args) #define pcm_dbg(pcm, fmt, args...) \ dev_dbg((pcm)->card->dev, fmt, ##args) /* helpers for copying between iov_iter and iomem */ size_t copy_to_iter_fromio(const void __iomem *src, size_t bytes, struct iov_iter *iter) __must_check; size_t copy_from_iter_toio(void __iomem *dst, size_t bytes, struct iov_iter *iter) __must_check; struct snd_pcm_status64 { snd_pcm_state_t state; /* stream state */ u8 rsvd[4]; s64 trigger_tstamp_sec; /* time when stream was started/stopped/paused */ s64 trigger_tstamp_nsec; s64 tstamp_sec; /* reference timestamp */ s64 tstamp_nsec; snd_pcm_uframes_t appl_ptr; /* appl ptr */ snd_pcm_uframes_t hw_ptr; /* hw ptr */ snd_pcm_sframes_t delay; /* current delay in frames */ snd_pcm_uframes_t avail; /* number of frames available */ snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */ snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */ snd_pcm_state_t suspended_state; /* suspended stream state */ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */ s64 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */ s64 audio_tstamp_nsec; s64 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */ s64 driver_tstamp_nsec; __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */ unsigned char reserved[52-4*sizeof(s64)]; /* must be filled with zero */ }; #define SNDRV_PCM_IOCTL_STATUS64 _IOR('A', 0x20, struct snd_pcm_status64) #define SNDRV_PCM_IOCTL_STATUS_EXT64 _IOWR('A', 0x24, struct snd_pcm_status64) struct snd_pcm_status32 { snd_pcm_state_t state; /* stream state */ s32 trigger_tstamp_sec; /* time when stream was started/stopped/paused */ s32 trigger_tstamp_nsec; s32 tstamp_sec; /* reference timestamp */ s32 tstamp_nsec; u32 appl_ptr; /* appl ptr */ u32 hw_ptr; /* hw ptr */ s32 delay; /* current delay in frames */ u32 avail; /* number of frames available */ u32 avail_max; /* max frames available on hw since last status */ u32 overrange; /* count of ADC (capture) overrange detections from last status */ snd_pcm_state_t suspended_state; /* suspended stream state */ u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */ s32 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */ s32 audio_tstamp_nsec; s32 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */ s32 driver_tstamp_nsec; u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */ unsigned char reserved[52-4*sizeof(s32)]; /* must be filled with zero */ }; #define SNDRV_PCM_IOCTL_STATUS32 _IOR('A', 0x20, struct snd_pcm_status32) #define SNDRV_PCM_IOCTL_STATUS_EXT32 _IOWR('A', 0x24, struct snd_pcm_status32) #endif /* __SOUND_PCM_H */
4 4 1 4 12 4 4 4 4 4 4 4 12 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 // SPDX-License-Identifier: GPL-2.0 /* USB Driver layer for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - controlling the baud rate doesn't make sense */ #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "usb-wwan.h" /* * Generate DTR/RTS signals on the port using the SET_CONTROL_LINE_STATE request * in CDC ACM. */ static int usb_wwan_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; int val = 0; int ifnum; int res; portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= USB_CDC_CTRL_DTR; if (portdata->rts_state) val |= USB_CDC_CTRL_RTS; ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; res = usb_autopm_get_interface(serial->interface); if (res) return res; res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, val, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT); usb_autopm_put_interface(port->serial->interface); return res; } void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; intfdata = usb_get_serial_data(port->serial); if (!intfdata->use_send_setup) return; portdata = usb_get_serial_port_data(port); /* FIXME: locking */ portdata->rts_state = on; portdata->dtr_state = on; usb_wwan_send_setup(port); } EXPORT_SYMBOL(usb_wwan_dtr_rts); int usb_wwan_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } EXPORT_SYMBOL(usb_wwan_tiocmget); int usb_wwan_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(port->serial); if (!intfdata->use_send_setup) return -EINVAL; /* FIXME: what locks portdata fields ? */ if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return usb_wwan_send_setup(port); } EXPORT_SYMBOL(usb_wwan_tiocmset); int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; int left, todo; struct urb *this_urb = NULL; /* spurious */ int err; unsigned long flags; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(port->serial); dev_dbg(&port->dev, "%s: write (%d chars)\n", __func__, count); left = count; for (i = 0; left > 0 && i < N_OUT_URB; i++) { todo = left; if (todo > OUT_BUFLEN) todo = OUT_BUFLEN; this_urb = portdata->out_urbs[i]; if (test_and_set_bit(i, &portdata->out_busy)) { if (time_before(jiffies, portdata->tx_start_time[i] + 10 * HZ)) continue; usb_unlink_urb(this_urb); continue; } dev_dbg(&port->dev, "%s: endpoint %d buf %d\n", __func__, usb_pipeendpoint(this_urb->pipe), i); err = usb_autopm_get_interface_async(port->serial->interface); if (err < 0) { clear_bit(i, &portdata->out_busy); break; } /* send the data */ memcpy(this_urb->transfer_buffer, buf, todo); this_urb->transfer_buffer_length = todo; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(this_urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit urb %d failed: %d\n", __func__, i, err); clear_bit(i, &portdata->out_busy); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_autopm_put_interface_async(port->serial->interface); break; } } portdata->tx_start_time[i] = jiffies; buf += todo; left -= todo; } count -= left; dev_dbg(&port->dev, "%s: wrote (did %d)\n", __func__, count); return count; } EXPORT_SYMBOL(usb_wwan_write); static void usb_wwan_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_serial_port *port; struct device *dev; unsigned char *data = urb->transfer_buffer; int status = urb->status; endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; dev = &port->dev; if (status) { dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", __func__, status, endpoint); /* don't resubmit on fatal errors */ if (status == -ESHUTDOWN || status == -ENOENT) return; } else { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } else dev_dbg(dev, "%s: empty read urb received\n", __func__); } /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM && err != -ENODEV) { dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } } else { usb_mark_last_busy(port->serial->dev); } } static void usb_wwan_outdat_callback(struct urb *urb) { struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; unsigned long flags; int i; port = urb->context; intfdata = usb_get_serial_data(port->serial); usb_serial_port_softint(port); usb_autopm_put_interface_async(port->serial->interface); portdata = usb_get_serial_port_data(port); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { smp_mb__before_atomic(); clear_bit(i, &portdata->out_busy); break; } } } unsigned int usb_wwan_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; unsigned int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; if (this_urb && !test_bit(i, &portdata->out_busy)) data_len += OUT_BUFLEN; } dev_dbg(&port->dev, "%s: %u\n", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_write_room); unsigned int usb_wwan_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; unsigned int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; /* FIXME: This locking is insufficient as this_urb may go unused during the test */ if (this_urb && test_bit(i, &portdata->out_busy)) data_len += this_urb->transfer_buffer_length; } dev_dbg(&port->dev, "%s: %u\n", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_chars_in_buffer); int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial *serial = port->serial; int i, err; struct urb *urb; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(serial); if (port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (err) { dev_err(&port->dev, "%s: submit int urb failed: %d\n", __func__, err); } } /* Start reading from the IN endpoint */ for (i = 0; i < N_IN_URB; i++) { urb = portdata->in_urbs[i]; if (!urb) continue; err = usb_submit_urb(urb, GFP_KERNEL); if (err) { dev_err(&port->dev, "%s: submit read urb %d failed: %d\n", __func__, i, err); } } spin_lock_irq(&intfdata->susp_lock); if (++intfdata->open_ports == 1) serial->interface->needs_remote_wakeup = 1; spin_unlock_irq(&intfdata->susp_lock); /* this balances a get in the generic USB serial code */ usb_autopm_put_interface(serial->interface); return 0; } EXPORT_SYMBOL(usb_wwan_open); static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata) { int i; for (i = 0; i < N_OUT_URB; i++) { if (urb == portdata->out_urbs[i]) { clear_bit(i, &portdata->out_busy); break; } } } void usb_wwan_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; portdata = usb_get_serial_port_data(port); /* * Need to take susp_lock to make sure port is not already being * resumed, but no need to hold it due to the tty-port initialized * flag. */ spin_lock_irq(&intfdata->susp_lock); if (--intfdata->open_ports == 0) serial->interface->needs_remote_wakeup = 0; spin_unlock_irq(&intfdata->susp_lock); for (;;) { urb = usb_get_from_anchor(&portdata->delayed); if (!urb) break; unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_async(serial->interface); } for (i = 0; i < N_IN_URB; i++) usb_kill_urb(portdata->in_urbs[i]); for (i = 0; i < N_OUT_URB; i++) usb_kill_urb(portdata->out_urbs[i]); usb_kill_urb(port->interrupt_in_urb); usb_autopm_get_interface_no_resume(serial->interface); } EXPORT_SYMBOL(usb_wwan_close); static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, int endpoint, int dir, void *ctx, char *buf, int len, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (!urb) return NULL; usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); if (intfdata->use_zlp && dir == USB_DIR_OUT) urb->transfer_flags |= URB_ZERO_PACKET; return urb; } int usb_wwan_port_probe(struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct urb *urb; u8 *buffer; int i; if (!port->bulk_in_size || !port->bulk_out_size) return -ENODEV; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) return -ENOMEM; init_usb_anchor(&portdata->delayed); for (i = 0; i < N_IN_URB; i++) { buffer = (u8 *)__get_free_page(GFP_KERNEL); if (!buffer) goto bail_out_error; portdata->in_buffer[i] = buffer; urb = usb_wwan_setup_urb(port, port->bulk_in_endpointAddress, USB_DIR_IN, port, buffer, IN_BUFLEN, usb_wwan_indat_callback); portdata->in_urbs[i] = urb; } for (i = 0; i < N_OUT_URB; i++) { buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error2; portdata->out_buffer[i] = buffer; urb = usb_wwan_setup_urb(port, port->bulk_out_endpointAddress, USB_DIR_OUT, port, buffer, OUT_BUFLEN, usb_wwan_outdat_callback); portdata->out_urbs[i] = urb; } usb_set_serial_port_data(port, portdata); return 0; bail_out_error2: for (i = 0; i < N_OUT_URB; i++) { usb_free_urb(portdata->out_urbs[i]); kfree(portdata->out_buffer[i]); } bail_out_error: for (i = 0; i < N_IN_URB; i++) { usb_free_urb(portdata->in_urbs[i]); free_page((unsigned long)portdata->in_buffer[i]); } kfree(portdata); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_wwan_port_probe); void usb_wwan_port_remove(struct usb_serial_port *port) { int i; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); usb_set_serial_port_data(port, NULL); for (i = 0; i < N_IN_URB; i++) { usb_free_urb(portdata->in_urbs[i]); free_page((unsigned long)portdata->in_buffer[i]); } for (i = 0; i < N_OUT_URB; i++) { usb_free_urb(portdata->out_urbs[i]); kfree(portdata->out_buffer[i]); } kfree(portdata); } EXPORT_SYMBOL(usb_wwan_port_remove); #ifdef CONFIG_PM static void stop_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); if (!portdata) continue; for (j = 0; j < N_IN_URB; j++) usb_kill_urb(portdata->in_urbs[j]); for (j = 0; j < N_OUT_URB; j++) usb_kill_urb(portdata->out_urbs[j]); usb_kill_urb(port->interrupt_in_urb); } } int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); spin_lock_irq(&intfdata->susp_lock); if (PMSG_IS_AUTO(message)) { if (intfdata->in_flight) { spin_unlock_irq(&intfdata->susp_lock); return -EBUSY; } } intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); stop_urbs(serial); return 0; } EXPORT_SYMBOL(usb_wwan_suspend); /* Caller must hold susp_lock. */ static int usb_wwan_submit_delayed_urbs(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *data = usb_get_serial_data(serial); struct usb_wwan_port_private *portdata; struct urb *urb; int err_count = 0; int err; portdata = usb_get_serial_port_data(port); for (;;) { urb = usb_get_from_anchor(&portdata->delayed); if (!urb) break; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit urb failed: %d\n", __func__, err); err_count++; unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_async(serial->interface); continue; } data->in_flight++; } if (err_count) return -EIO; return 0; } int usb_wwan_resume(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct usb_wwan_port_private *portdata; struct urb *urb; int err; int err_count = 0; spin_lock_irq(&intfdata->susp_lock); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; portdata = usb_get_serial_port_data(port); if (port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit int urb failed: %d\n", __func__, err); err_count++; } } err = usb_wwan_submit_delayed_urbs(port); if (err) err_count++; for (j = 0; j < N_IN_URB; j++) { urb = portdata->in_urbs[j]; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { dev_err(&port->dev, "%s: submit read urb %d failed: %d\n", __func__, i, err); err_count++; } } } intfdata->suspended = 0; spin_unlock_irq(&intfdata->susp_lock); if (err_count) return -EIO; return 0; } EXPORT_SYMBOL(usb_wwan_resume); #endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 // SPDX-License-Identifier: GPL-2.0-or-later /* * Nano River Technologies viperboard IIO ADC driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/iio/iio.h> #include <linux/mfd/viperboard.h> #define VPRBRD_ADC_CMD_GET 0x00 struct vprbrd_adc_msg { u8 cmd; u8 chan; u8 val; } __packed; struct vprbrd_adc { struct vprbrd *vb; }; #define VPRBRD_ADC_CHANNEL(_index) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _index, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ } static struct iio_chan_spec const vprbrd_adc_iio_channels[] = { VPRBRD_ADC_CHANNEL(0), VPRBRD_ADC_CHANNEL(1), VPRBRD_ADC_CHANNEL(2), VPRBRD_ADC_CHANNEL(3), }; static int vprbrd_iio_read_raw(struct iio_dev *iio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { int ret, error = 0; struct vprbrd_adc *adc = iio_priv(iio_dev); struct vprbrd *vb = adc->vb; struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf; switch (info) { case IIO_CHAN_INFO_RAW: mutex_lock(&vb->lock); admsg->cmd = VPRBRD_ADC_CMD_GET; admsg->chan = chan->channel; admsg->val = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg, sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS); if (ret != sizeof(struct vprbrd_adc_msg)) { dev_err(&iio_dev->dev, "usb send error on adc read\n"); error = -EREMOTEIO; } ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg, sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS); *val = admsg->val; mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_adc_msg)) { dev_err(&iio_dev->dev, "usb recv error on adc read\n"); error = -EREMOTEIO; } if (error) goto error; return IIO_VAL_INT; default: error = -EINVAL; break; } error: return error; } static const struct iio_info vprbrd_adc_iio_info = { .read_raw = &vprbrd_iio_read_raw, }; static int vprbrd_adc_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_adc *adc; struct iio_dev *indio_dev; int ret; /* registering iio */ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc)); if (!indio_dev) { dev_err(&pdev->dev, "failed allocating iio device\n"); return -ENOMEM; } adc = iio_priv(indio_dev); adc->vb = vb; indio_dev->name = "viperboard adc"; indio_dev->info = &vprbrd_adc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = vprbrd_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels); ret = devm_iio_device_register(&pdev->dev, indio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio (adc)"); return ret; } return 0; } static struct platform_driver vprbrd_adc_driver = { .driver = { .name = "viperboard-adc", }, .probe = vprbrd_adc_probe, }; module_platform_driver(vprbrd_adc_driver); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-adc");
90 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> */ #ifndef _LINUX_BITFIELD_H #define _LINUX_BITFIELD_H #include <linux/build_bug.h> #include <asm/byteorder.h> /* * Bitfield access macros * * FIELD_{GET,PREP} macros take as first parameter shifted mask * from which they extract the base mask and shift amount. * Mask must be a compilation time constant. * * Example: * * #include <linux/bitfield.h> * #include <linux/bits.h> * * #define REG_FIELD_A GENMASK(6, 0) * #define REG_FIELD_B BIT(7) * #define REG_FIELD_C GENMASK(15, 8) * #define REG_FIELD_D GENMASK(31, 16) * * Get: * a = FIELD_GET(REG_FIELD_A, reg); * b = FIELD_GET(REG_FIELD_B, reg); * * Set: * reg = FIELD_PREP(REG_FIELD_A, 1) | * FIELD_PREP(REG_FIELD_B, 0) | * FIELD_PREP(REG_FIELD_C, c) | * FIELD_PREP(REG_FIELD_D, 0x40); * * Modify: * reg &= ~REG_FIELD_C; * reg |= FIELD_PREP(REG_FIELD_C, c); */ #define __bf_shf(x) (__builtin_ffsll(x) - 1) #define __scalar_type_to_unsigned_cases(type) \ unsigned type: (unsigned type)0, \ signed type: (unsigned type)0 #define __unsigned_scalar_typeof(x) typeof( \ _Generic((x), \ char: (unsigned char)0, \ __scalar_type_to_unsigned_cases(char), \ __scalar_type_to_unsigned_cases(short), \ __scalar_type_to_unsigned_cases(int), \ __scalar_type_to_unsigned_cases(long), \ __scalar_type_to_unsigned_cases(long long), \ default: (x))) #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ ~((_mask) >> __bf_shf(_mask)) & \ (0 + (_val)) : 0, \ _pfx "value too large for the field"); \ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ __bf_cast_unsigned(_reg, ~0ull), \ _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ }) /** * FIELD_MAX() - produce the maximum value representable by a field * @_mask: shifted mask defining the field's length and position * * FIELD_MAX() returns the maximum value that can be held in the field * specified by @_mask. */ #define FIELD_MAX(_mask) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ }) /** * FIELD_FIT() - check if value fits in the field * @_mask: shifted mask defining the field's length and position * @_val: value to test against the field * * Return: true if @_val can fit inside @_mask, false if @_val is too big. */ #define FIELD_FIT(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) /** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position * @_val: value to put in the field * * FIELD_PREP() masks and shifts up the value. The result should * be combined with other fields of the bitfield using logical OR. */ #define FIELD_PREP(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ }) #define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0) /** * FIELD_PREP_CONST() - prepare a constant bitfield element * @_mask: shifted mask defining the field's length and position * @_val: value to put in the field * * FIELD_PREP_CONST() masks and shifts up the value. The result should * be combined with other fields of the bitfield using logical OR. * * Unlike FIELD_PREP() this is a constant expression and can therefore * be used in initializers. Error checking is less comfortable for this * version, and non-constant masks cannot be used. */ #define FIELD_PREP_CONST(_mask, _val) \ ( \ /* mask must be non-zero */ \ BUILD_BUG_ON_ZERO((_mask) == 0) + \ /* check if value fits */ \ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \ /* check if mask is contiguous */ \ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \ /* and create the value */ \ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \ ) /** * FIELD_GET() - extract a bitfield element * @_mask: shifted mask defining the field's length and position * @_reg: value of entire bitfield * * FIELD_GET() extracts the field specified by @_mask from the * bitfield passed in as @_reg by masking and shifting it down. */ #define FIELD_GET(_mask, _reg) \ ({ \ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) extern void __compiletime_error("value doesn't fit into mask") __field_overflow(void); extern void __compiletime_error("bad bitfield mask") __bad_mask(void); static __always_inline u64 field_multiplier(u64 field) { if ((field | (field - 1)) & ((field | (field - 1)) + 1)) __bad_mask(); return field & -field; } static __always_inline u64 field_mask(u64 field) { return field / field_multiplier(field); } #define field_max(field) ((typeof(field))field_mask(field)) #define ____MAKE_OP(type,base,to,from) \ static __always_inline __##type type##_encode_bits(base v, base field) \ { \ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ __field_overflow(); \ return to((v & field_mask(field)) * field_multiplier(field)); \ } \ static __always_inline __##type type##_replace_bits(__##type old, \ base val, base field) \ { \ return (old & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline void type##p_replace_bits(__##type *p, \ base val, base field) \ { \ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline base type##_get_bits(__##type v, base field) \ { \ return (from(v) & field)/field_multiplier(field); \ } #define __MAKE_OP(size) \ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ ____MAKE_OP(u##size,u##size,,) ____MAKE_OP(u8,u8,,) __MAKE_OP(16) __MAKE_OP(32) __MAKE_OP(64) #undef __MAKE_OP #undef ____MAKE_OP #endif
79 979 3243 3232 177 740 482 183 397 11 12 3186 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 /* SPDX-License-Identifier: GPL-2.0 */ /* * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). * * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. * (C) Linux Foundation 2008-2013 * Unified interface for all slab allocators */ #ifndef _LINUX_SLAB_H #define _LINUX_SLAB_H #include <linux/cache.h> #include <linux/gfp.h> #include <linux/overflow.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/percpu-refcount.h> #include <linux/cleanup.h> #include <linux/hash.h> enum _slab_flag_bits { _SLAB_CONSISTENCY_CHECKS, _SLAB_RED_ZONE, _SLAB_POISON, _SLAB_KMALLOC, _SLAB_HWCACHE_ALIGN, _SLAB_CACHE_DMA, _SLAB_CACHE_DMA32, _SLAB_STORE_USER, _SLAB_PANIC, _SLAB_TYPESAFE_BY_RCU, _SLAB_TRACE, #ifdef CONFIG_DEBUG_OBJECTS _SLAB_DEBUG_OBJECTS, #endif _SLAB_NOLEAKTRACE, _SLAB_NO_MERGE, #ifdef CONFIG_FAILSLAB _SLAB_FAILSLAB, #endif #ifdef CONFIG_MEMCG _SLAB_ACCOUNT, #endif #ifdef CONFIG_KASAN_GENERIC _SLAB_KASAN, #endif _SLAB_NO_USER_FLAGS, #ifdef CONFIG_KFENCE _SLAB_SKIP_KFENCE, #endif #ifndef CONFIG_SLUB_TINY _SLAB_RECLAIM_ACCOUNT, #endif _SLAB_OBJECT_POISON, _SLAB_CMPXCHG_DOUBLE, #ifdef CONFIG_SLAB_OBJ_EXT _SLAB_NO_OBJ_EXT, #endif _SLAB_FLAGS_LAST_BIT }; #define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr))) #define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U)) /* * Flags to pass to kmem_cache_create(). * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op */ /* DEBUG: Perform (expensive) checks on alloc/free */ #define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS) /* DEBUG: Red zone objs in a cache */ #define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE) /* DEBUG: Poison objects */ #define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) /* Indicate a kmalloc slab */ #define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) /** * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries. * * Sufficiently large objects are aligned on cache line boundary. For object * size smaller than a half of cache line size, the alignment is on the half of * cache line size. In general, if object size is smaller than 1/2^n of cache * line size, the alignment is adjusted to 1/2^n. * * If explicit alignment is also requested by the respective * &struct kmem_cache_args field, the greater of both is alignments is applied. */ #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) /* Use GFP_DMA memory */ #define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) /* Use GFP_DMA32 memory */ #define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32) /* DEBUG: Store the last owner for bug hunting */ #define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) /* Panic if kmem_cache_create() fails */ #define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) /** * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() * that memory location is free to be reused at any time. Thus it may * be possible to see another object there in the same RCU grace period. * * This feature only ensures the memory location backing the object * stays valid, the trick to using this is relying on an independent * object validation pass. Something like: * * :: * * begin: * rcu_read_lock(); * obj = lockless_lookup(key); * if (obj) { * if (!try_get_ref(obj)) // might fail for free objects * rcu_read_unlock(); * goto begin; * * if (obj->key != key) { // not the object we expected * put_ref(obj); * rcu_read_unlock(); * goto begin; * } * } * rcu_read_unlock(); * * This is useful if we need to approach a kernel structure obliquely, * from its address obtained without the usual locking. We can lock * the structure to stabilize it and check it's still at the given address, * only if we can be sure that the memory has not been meanwhile reused * for some other kind of object (which our subsystem's lock might corrupt). * * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. * * Note that it is not possible to acquire a lock within a structure * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages * are not zeroed before being given to the slab, which means that any * locks must be initialized after each and every kmem_struct_alloc(). * Alternatively, make the ctor passed to kmem_cache_create() initialize * the locks at page-allocation time, as is done in __i915_request_ctor(), * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers * to safely acquire those ctor-initialized locks under rcu_read_lock() * protection. * * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) /* Trace allocations and frees */ #define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) /* Flag to prevent checks on free */ #ifdef CONFIG_DEBUG_OBJECTS # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS) #else # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED #endif /* Avoid kmemleak tracing */ #define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE) /* * Prevent merging with compatible kmem caches. This flag should be used * cautiously. Valid use cases: * * - caches created for self-tests (e.g. kunit) * - general caches created and used by a subsystem, only when a * (subsystem-specific) debug option is enabled * - performance critical caches, should be very rare and consulted with slab * maintainers, and not used together with CONFIG_SLUB_TINY */ #define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE) /* Fault injection mark */ #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB) #else # define SLAB_FAILSLAB __SLAB_FLAG_UNUSED #endif /** * define SLAB_ACCOUNT - Account allocations to memcg. * * All object allocations from this cache will be memcg accounted, regardless of * __GFP_ACCOUNT being or not being passed to individual allocations. */ #ifdef CONFIG_MEMCG # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) #else # define SLAB_ACCOUNT __SLAB_FLAG_UNUSED #endif #ifdef CONFIG_KASAN_GENERIC #define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN) #else #define SLAB_KASAN __SLAB_FLAG_UNUSED #endif /* * Ignore user specified debugging flags. * Intended for caches created for self-tests so they have only flags * specified in the code and other flags are ignored. */ #define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS) #ifdef CONFIG_KFENCE #define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE) #else #define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED #endif /* The following flags affect the page allocator grouping pages by mobility */ /** * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable. * * Use this flag for caches that have an associated shrinker. As a result, slab * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by * mobility, and are accounted in SReclaimable counter in /proc/meminfo */ #ifndef CONFIG_SLUB_TINY #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) #else #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED #endif #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab created using create_boot_cache */ #ifdef CONFIG_SLAB_OBJ_EXT #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) #else #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif /* * freeptr_t represents a SLUB freelist pointer, which might be encoded * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. */ typedef struct { unsigned long v; } freeptr_t; /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. * * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. * Both make kfree a no-op. */ #define ZERO_SIZE_PTR ((void *)16) #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) #include <linux/kasan.h> struct list_lru; struct mem_cgroup; /* * struct kmem_cache related prototypes */ bool slab_is_available(void); /** * struct kmem_cache_args - Less common arguments for kmem_cache_create() * * Any uninitialized fields of the structure are interpreted as unused. The * exception is @freeptr_offset where %0 is a valid value, so * @use_freeptr_offset must be also set to %true in order to interpret the field * as used. For @useroffset %0 is also valid, but only with non-%0 * @usersize. * * When %NULL args is passed to kmem_cache_create(), it is equivalent to all * fields unused. */ struct kmem_cache_args { /** * @align: The required alignment for the objects. * * %0 means no specific alignment is requested. */ unsigned int align; /** * @useroffset: Usercopy region offset. * * %0 is a valid offset, when @usersize is non-%0 */ unsigned int useroffset; /** * @usersize: Usercopy region size. * * %0 means no usercopy region is specified. */ unsigned int usersize; /** * @freeptr_offset: Custom offset for the free pointer * in &SLAB_TYPESAFE_BY_RCU caches * * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer * outside of the object. This might cause the object to grow in size. * Cache creators that have a reason to avoid this can specify a custom * free pointer offset in their struct where the free pointer will be * placed. * * Note that placing the free pointer inside the object requires the * caller to ensure that no fields are invalidated that are required to * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for * details). * * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset * is specified, %use_freeptr_offset must be set %true. * * Note that @ctor currently isn't supported with custom free pointers * as a @ctor requires an external free pointer. */ unsigned int freeptr_offset; /** * @use_freeptr_offset: Whether a @freeptr_offset is used. */ bool use_freeptr_offset; /** * @ctor: A constructor for the objects. * * The constructor is invoked for each object in a newly allocated slab * page. It is the cache user's responsibility to free object in the * same state as after calling the constructor, or deal appropriately * with any differences between a freshly constructed and a reallocated * object. * * %NULL means no constructor. */ void (*ctor)(void *); }; struct kmem_cache *__kmem_cache_create_args(const char *name, unsigned int object_size, struct kmem_cache_args *args, slab_flags_t flags); static inline struct kmem_cache * __kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { struct kmem_cache_args kmem_args = { .align = align, .ctor = ctor, }; return __kmem_cache_create_args(name, size, &kmem_args, flags); } /** * kmem_cache_create_usercopy - Create a kmem cache with a region suitable * for copying to userspace. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @useroffset: Usercopy region offset * @usersize: Usercopy region size * @ctor: A constructor for the objects, or %NULL. * * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY() * if whitelisting a single field is sufficient, or kmem_cache_create() with * the necessary parameters passed via the args parameter (see * &struct kmem_cache_args) * * Return: a pointer to the cache on success, NULL on failure. */ static inline struct kmem_cache * kmem_cache_create_usercopy(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)) { struct kmem_cache_args kmem_args = { .align = align, .ctor = ctor, .useroffset = useroffset, .usersize = usersize, }; return __kmem_cache_create_args(name, size, &kmem_args, flags); } /* If NULL is passed for @args, use this variant with default arguments. */ static inline struct kmem_cache * __kmem_cache_default_args(const char *name, unsigned int size, struct kmem_cache_args *args, slab_flags_t flags) { struct kmem_cache_args kmem_default_args = {}; /* Make sure we don't get passed garbage. */ if (WARN_ON_ONCE(args)) return ERR_PTR(-EINVAL); return __kmem_cache_create_args(name, size, &kmem_default_args, flags); } /** * kmem_cache_create - Create a kmem cache. * @__name: A string which is used in /proc/slabinfo to identify this cache. * @__object_size: The size of objects to be created in this cache. * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL * means defaults will be used for all the arguments. * * This is currently implemented as a macro using ``_Generic()`` to call * either the new variant of the function, or a legacy one. * * The new variant has 4 parameters: * ``kmem_cache_create(name, object_size, args, flags)`` * * See __kmem_cache_create_args() which implements this. * * The legacy variant has 5 parameters: * ``kmem_cache_create(name, object_size, align, flags, ctor)`` * * The align and ctor parameters map to the respective fields of * &struct kmem_cache_args * * Context: Cannot be called within a interrupt, but can be interrupted. * * Return: a pointer to the cache on success, NULL on failure. */ #define kmem_cache_create(__name, __object_size, __args, ...) \ _Generic((__args), \ struct kmem_cache_args *: __kmem_cache_create_args, \ void *: __kmem_cache_default_args, \ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__) void kmem_cache_destroy(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s); /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. * * The alignment of the struct determines object alignment. If you * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ #define KMEM_CACHE(__struct, __flags) \ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ &(struct kmem_cache_args) { \ .align = __alignof__(struct __struct), \ }, (__flags)) /* * To whitelist a single field for copying to/from usercopy, use this * macro instead for KMEM_CACHE() above. */ #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ &(struct kmem_cache_args) { \ .align = __alignof__(struct __struct), \ .useroffset = offsetof(struct __struct, __field), \ .usersize = sizeof_field(struct __struct, __field), \ }, (__flags)) /* * Common kmalloc functions provided by all allocators */ void * __must_check krealloc_noprof(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); #define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) /** * ksize - Report actual allocation size of associated object * * @objp: Pointer returned from a prior kmalloc()-family allocation. * * This should not be used for writing beyond the originally requested * allocation size. Either use krealloc() or round up the allocation size * with kmalloc_size_roundup() prior to allocation. If this is used to * access beyond the originally requested allocation size, UBSAN_BOUNDS * and/or FORTIFY_SOURCE may trip, since they only know about the * originally allocated size via the __alloc_size attribute. */ size_t ksize(const void *objp); #ifdef CONFIG_PRINTK bool kmem_dump_obj(void *object); #else static inline bool kmem_dump_obj(void *object) { return false; } #endif /* * Some archs want to perform DMA into kmalloc caches and need a guaranteed * alignment larger than the alignment of a 64-bit integer. * Setting ARCH_DMA_MINALIGN in arch headers allows that. */ #ifdef ARCH_HAS_DMA_MINALIGN #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN) #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN #endif #endif #ifndef ARCH_KMALLOC_MINALIGN #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #elif ARCH_KMALLOC_MINALIGN > 8 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) #endif /* * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. * Intended for arches that get misalignment faults even for 64 bit integer * aligned buffers. */ #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif /* * Arches can define this function if they want to decide the minimum slab * alignment at runtime. The value returned by the function must be a power * of two and >= ARCH_SLAB_MINALIGN. */ #ifndef arch_slab_minalign static inline unsigned int arch_slab_minalign(void) { return ARCH_SLAB_MINALIGN; } #endif /* * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN. * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment. */ #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) #define __assume_page_alignment __assume_aligned(PAGE_SIZE) /* * Kmalloc array related definitions */ /* * SLUB directly allocates requests fitting in to an order-1 page * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) /* Maximum order allocatable via the slab allocator */ #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) /* * Kmalloc subsystem. */ #ifndef KMALLOC_MIN_SIZE #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) #endif /* * This restriction comes from byte sized index implementation. * Page size is normally 2^12 bytes and, in this case, if we want to use * byte sized index which can represent 2^8 entries, the size of the object * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. * If minimum size of kmalloc is less than 16, we use it as minimum object * size and give up to use byte sized index. */ #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ (KMALLOC_MIN_SIZE) : 16) #ifdef CONFIG_RANDOM_KMALLOC_CACHES #define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies #else #define RANDOM_KMALLOC_CACHES_NR 0 #endif /* * Whenever changing this, take care of that kmalloc_type() and * create_kmalloc_caches() still work as intended. * * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP * is for accounted but unreclaimable and non-dma objects. All the other * kmem caches can have both accounted and unaccounted objects. */ enum kmalloc_cache_type { KMALLOC_NORMAL = 0, #ifndef CONFIG_ZONE_DMA KMALLOC_DMA = KMALLOC_NORMAL, #endif #ifndef CONFIG_MEMCG KMALLOC_CGROUP = KMALLOC_NORMAL, #endif KMALLOC_RANDOM_START = KMALLOC_NORMAL, KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR, #ifdef CONFIG_SLUB_TINY KMALLOC_RECLAIM = KMALLOC_NORMAL, #else KMALLOC_RECLAIM, #endif #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, #endif #ifdef CONFIG_MEMCG KMALLOC_CGROUP, #endif NR_KMALLOC_TYPES }; typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1]; extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES]; /* * Define gfp bits that should not be set for KMALLOC_NORMAL. */ #define KMALLOC_NOT_NORMAL_BITS \ (__GFP_RECLAIMABLE | \ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0)) extern unsigned long random_kmalloc_seed; static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller) { /* * The most common case is KMALLOC_NORMAL, so test for it * with a single branch for all the relevant flags. */ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) #ifdef CONFIG_RANDOM_KMALLOC_CACHES /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */ return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed, ilog2(RANDOM_KMALLOC_CACHES_NR + 1)); #else return KMALLOC_NORMAL; #endif /* * At least one of the flags has to be set. Their priorities in * decreasing order are: * 1) __GFP_DMA * 2) __GFP_RECLAIMABLE * 3) __GFP_ACCOUNT */ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) return KMALLOC_DMA; if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE)) return KMALLOC_RECLAIM; else return KMALLOC_CGROUP; } /* * Figure out which kmalloc slab an allocation of a certain size * belongs to. * 0 = zero alloc * 1 = 65 .. 96 bytes * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n * * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; * typical usage is via kmalloc_index() and therefore evaluated at compile-time. * Callers where !size_is_constant should only be test modules, where runtime * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). */ static __always_inline unsigned int __kmalloc_index(size_t size, bool size_is_constant) { if (!size) return 0; if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) return 1; if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) return 2; if (size <= 8) return 3; if (size <= 16) return 4; if (size <= 32) return 5; if (size <= 64) return 6; if (size <= 128) return 7; if (size <= 256) return 8; if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; if (size <= 4 * 1024) return 12; if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; if (size <= 32 * 1024) return 15; if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; if (size <= 2 * 1024 * 1024) return 21; if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); else BUG(); /* Will never be reached. Needed because the compiler may complain */ return -1; } static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) #include <linux/alloc_tag.h> /** * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. * @flags: See kmalloc(). * * Allocate an object from this cache. * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags. * * Return: pointer to the new object or %NULL in case of error */ void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; #define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; #define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) /** * kmem_cache_charge - memcg charge an already allocated slab memory * @objp: address of the slab object to memcg charge * @gfpflags: describe the allocation context * * kmem_cache_charge allows charging a slab object to the current memcg, * primarily in cases where charging at allocation time might not be possible * because the target memcg is not known (i.e. softirq context) * * The objp should be pointer returned by the slab allocator functions like * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge * behavior can be controlled through gfpflags parameter, which affects how the * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes * that overcharging is requested instead of failure, but is not applied for the * internal metadata allocation. * * There are several cases where it will return true even if the charging was * not done: * More specifically: * * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems. * 2. Already charged slab objects. * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc() * without __GFP_ACCOUNT * 4. Allocating internal metadata has failed * * Return: true if charge was successful otherwise false. */ bool kmem_cache_charge(void *objp, gfp_t gfpflags); void kmem_cache_free(struct kmem_cache *s, void *objp); kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); /* * Bulk allocation and freeing operations. These are accelerated in an * allocator specific way to avoid taking locks repeatedly or building * metadata structures unnecessarily. * * Note that interrupts must be enabled when calling these functions. */ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); #define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__)) static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); } void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment __malloc; #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) /* * These macros allow declaring a kmem_buckets * parameter alongside size, which * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call * sites don't have to pass NULL. */ #ifdef CONFIG_SLAB_BUCKETS #define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b) #define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b) #define PASS_BUCKET_PARAM(_b) (_b) #else #define DECL_BUCKET_PARAMS(_size, _b) size_t (_size) #define PASS_BUCKET_PARAMS(_size, _b) (_size) #define PASS_BUCKET_PARAM(_b) NULL #endif /* * The following functions are not to be used directly and are intended only * for internal use from kmalloc() and kmalloc_node() * with the exception of kunit tests */ void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __assume_kmalloc_alignment __alloc_size(1); void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size) __assume_kmalloc_alignment __alloc_size(3); void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) __assume_kmalloc_alignment __alloc_size(4); void *__kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment __alloc_size(1); void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment __alloc_size(1); /** * kmalloc - allocate kernel memory * @size: how many bytes of memory are required. * @flags: describe the allocation context * * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN * bytes. For @size of power of two bytes, the alignment is also guaranteed * to be at least to the size. For other sizes, the alignment is guaranteed to * be at least the largest power-of-two divisor of @size. * * The @flags argument may be one of the GFP flags defined at * include/linux/gfp_types.h and described at * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` * * The recommended usage of the @flags is described at * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` * * Below is a brief outline of the most useful GFP flags * * %GFP_KERNEL * Allocate normal kernel ram. May sleep. * * %GFP_NOWAIT * Allocation will not sleep. * * %GFP_ATOMIC * Allocation will not sleep. May use emergency pools. * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * * %__GFP_ZERO * Zero the allocated memory before returning. Also see kzalloc(). * * %__GFP_HIGH * This allocation has high priority and may use emergency pools. * * %__GFP_NOFAIL * Indicate that this allocation is in no way allowed to fail * (think twice before using). * * %__GFP_NORETRY * If memory is not immediately available, * then give up at once. * * %__GFP_NOWARN * If allocation fails, don't issue any warnings. * * %__GFP_RETRY_MAYFAIL * Try really hard to succeed the allocation but fail * eventually. */ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) return __kmalloc_large_noprof(size, flags); index = kmalloc_index(size); return __kmalloc_cache_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, size); } return __kmalloc_noprof(size, flags); } #define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) #define kmem_buckets_alloc(_b, _size, _flags) \ alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) #define kmem_buckets_alloc_track_caller(_b, _size, _flags) \ alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_)) static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) return __kmalloc_large_node_noprof(size, flags, node); index = kmalloc_index(size); return __kmalloc_cache_node_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, node, size); } return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node); } #define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) /** * kmalloc_array - allocate memory for an array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) return kmalloc_noprof(bytes, flags); return kmalloc_noprof(bytes, flags); } #define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) /** * krealloc_array - reallocate memory for an array. * @p: pointer to the memory chunk to reallocate * @new_n: new number of elements to alloc * @new_size: new size of a single member of the array * @flags: the type of memory to allocate (see kmalloc) * * If __GFP_ZERO logic is requested, callers must ensure that, starting with the * initial memory allocation, every subsequent call to this API for the same * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that * __GFP_ZERO is not fully honored by this API. * * See krealloc_noprof() for further details. * * In any case, the contents of the object pointed to are preserved up to the * lesser of the new and old sizes. */ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, size_t new_n, size_t new_size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) return NULL; return krealloc_noprof(p, bytes, flags); } #define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__)) /** * kcalloc - allocate memory for an array. The memory is set to zero. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ #define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node, unsigned long caller) __alloc_size(1); #define kmalloc_node_track_caller_noprof(size, flags, node, caller) \ __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller) #define kmalloc_node_track_caller(...) \ alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) /* * kmalloc_track_caller is a special version of kmalloc that records the * calling function of the routine calling it for slab leak tracking instead * of just the calling function (confusing, eh?). * It's useful when the call to kmalloc comes from a widely-used standard * allocator where we care about the real place the memory allocation * request comes from. */ #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) #define kmalloc_track_caller_noprof(...) \ kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) return kmalloc_node_noprof(bytes, flags, node); return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node); } #define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) #define kcalloc_node(_n, _size, _flags, _node) \ kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) /* * Shortcuts */ #define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) { return kmalloc_noprof(size, flags | __GFP_ZERO); } #define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) #define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1); #define kvmalloc_node_noprof(size, flags, node) \ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node) #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) #define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) #define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO) #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) #define kmem_buckets_valloc(_b, _size, _flags) \ alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) static inline __alloc_size(1, 2) void * kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return kvmalloc_node_noprof(bytes, flags, node); } #define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) #define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node) #define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) #define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) #define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) #define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags) __realloc_size(2); #define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) extern void kvfree(const void *addr); DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) extern void kvfree_sensitive(const void *addr, size_t len); unsigned int kmem_cache_size(struct kmem_cache *s); /** * kmalloc_size_roundup - Report allocation bucket size for the given size * * @size: Number of bytes to round up from. * * This returns the number of bytes that would be available in a kmalloc() * allocation of @size bytes. For example, a 126 byte request would be * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly * for the general-purpose kmalloc()-based allocations, and is not for the * pre-sized kmem_cache_alloc()-based allocations.) * * Use this to kmalloc() the full bucket size ahead of time instead of using * ksize() to query the size after an allocation. */ size_t kmalloc_size_roundup(size_t size); void __init kmem_cache_init_late(void); void __init kvfree_rcu_init(void); #endif /* _LINUX_SLAB_H */
3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 // SPDX-License-Identifier: GPL-2.0 /* * Block stat tracking code * * Copyright (C) 2016 Jens Axboe */ #include <linux/kernel.h> #include <linux/rculist.h> #include "blk-stat.h" #include "blk-mq.h" #include "blk.h" struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; int accounting; }; void blk_rq_stat_init(struct blk_rq_stat *stat) { stat->min = -1ULL; stat->max = stat->nr_samples = stat->mean = 0; stat->batch = 0; } /* src is a per-cpu stat, mean isn't initialized */ void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) { if (dst->nr_samples + src->nr_samples <= dst->nr_samples) return; dst->min = min(dst->min, src->min); dst->max = max(dst->max, src->max); dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples, dst->nr_samples + src->nr_samples); dst->nr_samples += src->nr_samples; } void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) { stat->min = min(stat->min, value); stat->max = max(stat->max, value); stat->batch += value; stat->nr_samples++; } void blk_stat_add(struct request *rq, u64 now) { struct request_queue *q = rq->q; struct blk_stat_callback *cb; struct blk_rq_stat *stat; int bucket, cpu; u64 value; value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; rcu_read_lock(); cpu = get_cpu(); list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { if (!blk_stat_is_active(cb)) continue; bucket = cb->bucket_fn(rq); if (bucket < 0) continue; stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; blk_rq_stat_add(stat, value); } put_cpu(); rcu_read_unlock(); } static void blk_stat_timer_fn(struct timer_list *t) { struct blk_stat_callback *cb = from_timer(cb, t, timer); unsigned int bucket; int cpu; for (bucket = 0; bucket < cb->buckets; bucket++) blk_rq_stat_init(&cb->stat[bucket]); for_each_online_cpu(cpu) { struct blk_rq_stat *cpu_stat; cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) { blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); blk_rq_stat_init(&cpu_stat[bucket]); } } cb->timer_fn(cb); } struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), int (*bucket_fn)(const struct request *), unsigned int buckets, void *data) { struct blk_stat_callback *cb; cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return NULL; cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), GFP_KERNEL); if (!cb->stat) { kfree(cb); return NULL; } cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), __alignof__(struct blk_rq_stat)); if (!cb->cpu_stat) { kfree(cb->stat); kfree(cb); return NULL; } cb->timer_fn = timer_fn; cb->bucket_fn = bucket_fn; cb->data = data; cb->buckets = buckets; timer_setup(&cb->timer, blk_stat_timer_fn, 0); return cb; } void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) { unsigned int bucket; unsigned long flags; int cpu; for_each_possible_cpu(cpu) { struct blk_rq_stat *cpu_stat; cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) blk_rq_stat_init(&cpu_stat[bucket]); } spin_lock_irqsave(&q->stats->lock, flags); list_add_tail_rcu(&cb->list, &q->stats->callbacks); blk_queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); } void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->accounting) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); del_timer_sync(&cb->timer); } static void blk_stat_free_callback_rcu(struct rcu_head *head) { struct blk_stat_callback *cb; cb = container_of(head, struct blk_stat_callback, rcu); free_percpu(cb->cpu_stat); kfree(cb->stat); kfree(cb); } void blk_stat_free_callback(struct blk_stat_callback *cb) { if (cb) call_rcu(&cb->rcu, blk_stat_free_callback_rcu); } void blk_stat_disable_accounting(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); if (!--q->stats->accounting && list_empty(&q->stats->callbacks)) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); } EXPORT_SYMBOL_GPL(blk_stat_disable_accounting); void blk_stat_enable_accounting(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); if (!q->stats->accounting++ && list_empty(&q->stats->callbacks)) blk_queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); } EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); struct blk_queue_stats *blk_alloc_queue_stats(void) { struct blk_queue_stats *stats; stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return NULL; INIT_LIST_HEAD(&stats->callbacks); spin_lock_init(&stats->lock); stats->accounting = 0; return stats; } void blk_free_queue_stats(struct blk_queue_stats *stats) { if (!stats) return; WARN_ON(!list_empty(&stats->callbacks)); kfree(stats); }
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Lexar "Jumpshot" Compact Flash reader * * jumpshot driver v0.1: * * First release * * Current development and maintenance by: * (c) 2000 Jimmie Mayfield (mayfield+usb@sackheads.org) * * Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver * which I used as a template for this driver. * * Some bugfixes and scatter-gather code by Gregory P. Smith * (greg-usb@electricrain.com) * * Fix for media change by Joerg Schneider (js@joergschneider.com) * * Developed with the assistance of: * * (C) 2002 Alan Stern <stern@rowland.org> */ /* * This driver attempts to support the Lexar Jumpshot USB CompactFlash * reader. Like many other USB CompactFlash readers, the Jumpshot contains * a USB-to-ATA chip. * * This driver supports reading and writing. If you're truly paranoid, * however, you can force the driver into a write-protected state by setting * the WP enable bits in jumpshot_handle_mode_sense. See the comments * in that routine. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-jumpshot" MODULE_DESCRIPTION("Driver for Lexar \"Jumpshot\" Compact Flash reader"); MODULE_AUTHOR("Jimmie Mayfield <mayfield+usb@sackheads.org>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id jumpshot_usb_ids[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev jumpshot_unusual_dev_list[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV struct jumpshot_info { unsigned long sectors; /* total sector count */ unsigned long ssize; /* sector size in bytes */ /* the following aren't used yet */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; static inline int jumpshot_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, len, NULL); } static inline int jumpshot_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, data, len, NULL); } static int jumpshot_get_status(struct us_data *us) { int rc; if (!us) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, 0, 0xA0, 0, 7, us->iobuf, 1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (us->iobuf[0] != 0x50) { usb_stor_dbg(us, "0x%2x\n", us->iobuf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } static int jumpshot_read_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't read more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x20; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result result = jumpshot_bulk_read(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; usb_stor_dbg(us, "%d bytes\n", len); // Store the data in the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, TO_XFER_BUF); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_write_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result, waitcount; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Jumpshot // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. // if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; // Since we don't write more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; // Get the data from the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, FROM_XFER_BUF); command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 | ((sector >> 24) & 0x0F); command[6] = 0x30; // send the setup + command result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 1, command, 7); if (result != USB_STOR_XFER_GOOD) goto leave; // send the data result = jumpshot_bulk_write(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result. apparently the bulk write can complete // before the jumpshot drive is finished writing. so we loop // here until we get a good return code waitcount = 0; do { result = jumpshot_get_status(us); if (result != USB_STOR_TRANSPORT_GOOD) { // I have not experimented to find the smallest value. // msleep(50); } } while ((result != USB_STOR_TRANSPORT_GOOD) && (waitcount < 10)); if (result != USB_STOR_TRANSPORT_GOOD) usb_stor_dbg(us, "Gah! Waitcount = 10. Bad write!?\n"); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return result; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int jumpshot_id_device(struct us_data *us, struct jumpshot_info *info) { unsigned char *command = us->iobuf; unsigned char *reply; int rc; if (!info) return USB_STOR_TRANSPORT_ERROR; command[0] = 0xE0; command[1] = 0xEC; reply = kmalloc(512, GFP_NOIO); if (!reply) return USB_STOR_TRANSPORT_ERROR; // send the setup rc = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 0, 0x20, 0, 6, command, 2); if (rc != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Gah! send_control for read_capacity failed\n"); rc = USB_STOR_TRANSPORT_ERROR; goto leave; } // read the reply rc = jumpshot_bulk_read(us, reply, 512); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } info->sectors = ((u32)(reply[117]) << 24) | ((u32)(reply[116]) << 16) | ((u32)(reply[115]) << 8) | ((u32)(reply[114]) ); rc = USB_STOR_TRANSPORT_GOOD; leave: kfree(reply); return rc; } static int jumpshot_handle_mode_sense(struct us_data *us, struct scsi_cmnd * srb, int sense_6) { static unsigned char rw_err_page[12] = { 0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0 }; static unsigned char cache_page[12] = { 0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char rbac_page[12] = { 0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned char timer_page[8] = { 0x1C, 0x6, 0, 0, 0, 0 }; unsigned char pc, page_code; unsigned int i = 0; struct jumpshot_info *info = (struct jumpshot_info *) (us->extra); unsigned char *ptr = us->iobuf; pc = srb->cmnd[2] >> 6; page_code = srb->cmnd[2] & 0x3F; switch (pc) { case 0x0: usb_stor_dbg(us, "Current values\n"); break; case 0x1: usb_stor_dbg(us, "Changeable values\n"); break; case 0x2: usb_stor_dbg(us, "Default values\n"); break; case 0x3: usb_stor_dbg(us, "Saves values\n"); break; } memset(ptr, 0, 8); if (sense_6) { ptr[2] = 0x00; // WP enable: 0x80 i = 4; } else { ptr[3] = 0x00; // WP enable: 0x80 i = 8; } switch (page_code) { case 0x0: // vendor-specific mode info->sense_key = 0x05; info->sense_asc = 0x24; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; case 0x1: memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; case 0x8: memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); break; case 0x1B: memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); break; case 0x1C: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); break; case 0x3F: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; } if (sense_6) ptr[0] = i - 1; else ((__be16 *) ptr)[0] = cpu_to_be16(i - 2); usb_stor_set_xfer_buf(ptr, i, srb); return USB_STOR_TRANSPORT_GOOD; } static void jumpshot_info_destructor(void *extra) { // this routine is a placeholder... // currently, we don't allocate any extra blocks so we're okay } // Transport for the Lexar 'Jumpshot' // static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us) { struct jumpshot_info *info; int rc; unsigned long block, blocks; unsigned char *ptr = us->iobuf; static unsigned char inquiry_response[8] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (!us->extra) { us->extra = kzalloc(sizeof(struct jumpshot_info), GFP_NOIO); if (!us->extra) return USB_STOR_TRANSPORT_ERROR; us->extra_destructor = jumpshot_info_destructor; } info = (struct jumpshot_info *) (us->extra); if (srb->cmnd[0] == INQUIRY) { usb_stor_dbg(us, "INQUIRY - Returning bogus response\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_CAPACITY) { info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec rc = jumpshot_get_status(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; rc = jumpshot_id_device(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", info->sectors, info->ssize); // build the reply // ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); ((__be32 *) ptr)[1] = cpu_to_be32(info->ssize); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SELECT_10) { usb_stor_dbg(us, "Gah! MODE_SELECT_10\n"); return USB_STOR_TRANSPORT_ERROR; } if (srb->cmnd[0] == READ_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == READ_12) { // I don't think we'll ever see a READ_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n", block, blocks); return jumpshot_read_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_12) { // I don't think we'll ever see a WRITE_12 but support it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n", block, blocks); return jumpshot_write_data(us, info, block, blocks); } if (srb->cmnd[0] == TEST_UNIT_READY) { usb_stor_dbg(us, "TEST_UNIT_READY\n"); return jumpshot_get_status(us); } if (srb->cmnd[0] == REQUEST_SENSE) { usb_stor_dbg(us, "REQUEST_SENSE\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SENSE) { usb_stor_dbg(us, "MODE_SENSE_6 detected\n"); return jumpshot_handle_mode_sense(us, srb, 1); } if (srb->cmnd[0] == MODE_SENSE_10) { usb_stor_dbg(us, "MODE_SENSE_10 detected\n"); return jumpshot_handle_mode_sense(us, srb, 0); } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { /* * sure. whatever. not like we can stop the user from popping * the media out of the device (no locking doors, etc) */ return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == START_STOP) { /* * this is used by sd.c'check_scsidisk_media_change to detect * media change */ usb_stor_dbg(us, "START_STOP\n"); /* * the first jumpshot_id_device after a media change returns * an error (determined experimentally) */ rc = jumpshot_id_device(us, info); if (rc == USB_STOR_TRANSPORT_GOOD) { info->sense_key = NO_SENSE; srb->result = SUCCESS; } else { info->sense_key = UNIT_ATTENTION; srb->result = SAM_STAT_CHECK_CONDITION; } return rc; } usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static struct scsi_host_template jumpshot_host_template; static int jumpshot_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - jumpshot_usb_ids) + jumpshot_unusual_dev_list, &jumpshot_host_template); if (result) return result; us->transport_name = "Lexar Jumpshot Control/Bulk"; us->transport = jumpshot_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver jumpshot_driver = { .name = DRV_NAME, .probe = jumpshot_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = jumpshot_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(jumpshot_driver, jumpshot_host_template, DRV_NAME);
38 38 16 38 38 38 38 38 16 38 38 38 38 38 38 38 37 38 38 38 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer device management * Copyright (c) 1999 by Takashi Iwai <tiwai@suse.de> * *---------------------------------------------------------------- * * This device handler separates the card driver module from sequencer * stuff (sequencer core, synth drivers, etc), so that user can avoid * to spend unnecessary resources e.g. if he needs only listening to * MP3s. * * The card (or lowlevel) driver creates a sequencer device entry * via snd_seq_device_new(). This is an entry pointer to communicate * with the sequencer device "driver", which is involved with the * actual part to communicate with the sequencer core. * Each sequencer device entry has an id string and the corresponding * driver with the same id is loaded when required. For example, * lowlevel codes to access emu8000 chip on sbawe card are included in * emu8000-synth module. To activate this module, the hardware * resources like i/o port are passed via snd_seq_device argument. */ #include <linux/device.h> #include <linux/init.h> #include <linux/module.h> #include <sound/core.h> #include <sound/info.h> #include <sound/seq_device.h> #include <sound/seq_kernel.h> #include <sound/initval.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/mutex.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA sequencer device management"); MODULE_LICENSE("GPL"); /* * bus definition */ static int snd_seq_bus_match(struct device *dev, const struct device_driver *drv) { struct snd_seq_device *sdev = to_seq_dev(dev); struct snd_seq_driver *sdrv = to_seq_drv(drv); return strcmp(sdrv->id, sdev->id) == 0 && sdrv->argsize == sdev->argsize; } static const struct bus_type snd_seq_bus_type = { .name = "snd_seq", .match = snd_seq_bus_match, }; /* * proc interface -- just for compatibility */ #ifdef CONFIG_SND_PROC_FS static struct snd_info_entry *info_entry; static int print_dev_info(struct device *dev, void *data) { struct snd_seq_device *sdev = to_seq_dev(dev); struct snd_info_buffer *buffer = data; snd_iprintf(buffer, "snd-%s,%s,%d\n", sdev->id, dev->driver ? "loaded" : "empty", dev->driver ? 1 : 0); return 0; } static void snd_seq_device_info(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { bus_for_each_dev(&snd_seq_bus_type, NULL, buffer, print_dev_info); } #endif /* * load all registered drivers (called from seq_clientmgr.c) */ #ifdef CONFIG_MODULES /* flag to block auto-loading */ static atomic_t snd_seq_in_init = ATOMIC_INIT(1); /* blocked as default */ static int request_seq_drv(struct device *dev, void *data) { struct snd_seq_device *sdev = to_seq_dev(dev); if (!dev->driver) request_module("snd-%s", sdev->id); return 0; } static void autoload_drivers(struct work_struct *work) { /* avoid reentrance */ if (atomic_inc_return(&snd_seq_in_init) == 1) bus_for_each_dev(&snd_seq_bus_type, NULL, NULL, request_seq_drv); atomic_dec(&snd_seq_in_init); } static DECLARE_WORK(autoload_work, autoload_drivers); static void queue_autoload_drivers(void) { schedule_work(&autoload_work); } void snd_seq_autoload_init(void) { atomic_dec(&snd_seq_in_init); #ifdef CONFIG_SND_SEQUENCER_MODULE /* initial autoload only when snd-seq is a module */ queue_autoload_drivers(); #endif } EXPORT_SYMBOL(snd_seq_autoload_init); void snd_seq_autoload_exit(void) { atomic_inc(&snd_seq_in_init); } EXPORT_SYMBOL(snd_seq_autoload_exit); void snd_seq_device_load_drivers(void) { queue_autoload_drivers(); flush_work(&autoload_work); } EXPORT_SYMBOL(snd_seq_device_load_drivers); static inline void cancel_autoload_drivers(void) { cancel_work_sync(&autoload_work); } #else static inline void queue_autoload_drivers(void) { } static inline void cancel_autoload_drivers(void) { } #endif /* * device management */ static int snd_seq_device_dev_free(struct snd_device *device) { struct snd_seq_device *dev = device->device_data; cancel_autoload_drivers(); if (dev->private_free) dev->private_free(dev); put_device(&dev->dev); return 0; } static int snd_seq_device_dev_register(struct snd_device *device) { struct snd_seq_device *dev = device->device_data; int err; err = device_add(&dev->dev); if (err < 0) return err; if (!dev->dev.driver) queue_autoload_drivers(); return 0; } static int snd_seq_device_dev_disconnect(struct snd_device *device) { struct snd_seq_device *dev = device->device_data; device_del(&dev->dev); return 0; } static void snd_seq_dev_release(struct device *dev) { kfree(to_seq_dev(dev)); } /* * register a sequencer device * card = card info * device = device number (if any) * id = id of driver * result = return pointer (NULL allowed if unnecessary) */ int snd_seq_device_new(struct snd_card *card, int device, const char *id, int argsize, struct snd_seq_device **result) { struct snd_seq_device *dev; int err; static const struct snd_device_ops dops = { .dev_free = snd_seq_device_dev_free, .dev_register = snd_seq_device_dev_register, .dev_disconnect = snd_seq_device_dev_disconnect, }; if (result) *result = NULL; if (snd_BUG_ON(!id)) return -EINVAL; dev = kzalloc(sizeof(*dev) + argsize, GFP_KERNEL); if (!dev) return -ENOMEM; /* set up device info */ dev->card = card; dev->device = device; dev->id = id; dev->argsize = argsize; device_initialize(&dev->dev); dev->dev.parent = &card->card_dev; dev->dev.bus = &snd_seq_bus_type; dev->dev.release = snd_seq_dev_release; dev_set_name(&dev->dev, "%s-%d-%d", dev->id, card->number, device); /* add this device to the list */ err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops); if (err < 0) { put_device(&dev->dev); return err; } if (result) *result = dev; return 0; } EXPORT_SYMBOL(snd_seq_device_new); /* * driver registration */ int __snd_seq_driver_register(struct snd_seq_driver *drv, struct module *mod) { if (WARN_ON(!drv->driver.name || !drv->id)) return -EINVAL; drv->driver.bus = &snd_seq_bus_type; drv->driver.owner = mod; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__snd_seq_driver_register); void snd_seq_driver_unregister(struct snd_seq_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(snd_seq_driver_unregister); /* * module part */ static int __init seq_dev_proc_init(void) { #ifdef CONFIG_SND_PROC_FS info_entry = snd_info_create_module_entry(THIS_MODULE, "drivers", snd_seq_root); if (info_entry == NULL) return -ENOMEM; info_entry->content = SNDRV_INFO_CONTENT_TEXT; info_entry->c.text.read = snd_seq_device_info; if (snd_info_register(info_entry) < 0) { snd_info_free_entry(info_entry); return -ENOMEM; } #endif return 0; } static int __init alsa_seq_device_init(void) { int err; err = bus_register(&snd_seq_bus_type); if (err < 0) return err; err = seq_dev_proc_init(); if (err < 0) bus_unregister(&snd_seq_bus_type); return err; } static void __exit alsa_seq_device_exit(void) { #ifdef CONFIG_MODULES cancel_work_sync(&autoload_work); #endif #ifdef CONFIG_SND_PROC_FS snd_info_free_entry(info_entry); #endif bus_unregister(&snd_seq_bus_type); } subsys_initcall(alsa_seq_device_init) module_exit(alsa_seq_device_exit)
3578 3579 3578 3579 3578 1449 3348 3341 3353 3346 1770 78 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <linux/irqflags.h> #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/processor.h> #include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define RCU_SEQ_CTR_SHIFT 2 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); struct rcu_gp_oldstate; unsigned long get_completed_synchronize_rcu(void); void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); // Maximum number of unsigned long values corresponding to // not-yet-completed RCU grace periods. #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2 /** * same_state_synchronize_rcu - Are two old-state values identical? * @oldstate1: First old-state value. * @oldstate2: Second old-state value. * * The two old-state values must have been obtained from either * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or * get_completed_synchronize_rcu(). Returns @true if the two values are * identical and @false otherwise. This allows structures whose lifetimes * are tracked by old-state values to push these values to a list header, * allowing those structures to be slightly smaller. */ static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2) { return oldstate1 == oldstate2; } #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TINY_RCU #define rcu_read_unlock_strict() do { } while (0) #else void rcu_read_unlock_strict(void); #endif static inline void __rcu_read_lock(void) { preempt_disable(); } static inline void __rcu_read_unlock(void) { preempt_enable(); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_RCU_LAZY void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func); #else static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } #endif /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); #else static inline void rcu_init_tasks_generic(void) { } #endif #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else static inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); #define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } #define RCU_NOCB_LOCKDEP_WARN(c, s) #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU_GENERIC # ifdef CONFIG_TASKS_RCU # define rcu_tasks_classic_qs(t, preempt) \ do { \ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_tasks_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_classic_qs(t, preempt) do { } while (0) # define call_rcu_tasks call_rcu # define synchronize_rcu_tasks synchronize_rcu # endif # ifdef CONFIG_TASKS_TRACE_RCU // Bits for ->trc_reader_special.b.need_qs field. #define TRC_NEED_QS 0x1 // Task needs a quiescent state. #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); void rcu_tasks_trace_qs_blkd(struct task_struct *t); # define rcu_tasks_trace_qs(t) \ do { \ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ \ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ likely(!___rttq_nesting)) { \ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ rcu_tasks_trace_qs_blkd(t); \ } \ } while (0) void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_trace_qs(t) do { } while (0) # endif #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU void synchronize_rcu_tasks_rude(void); void rcu_tasks_rude_torture_stats_print(char *tt, char *tf); # endif #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? * * As an accident of implementation, an RCU Tasks Trace grace period also * acts as an RCU grace period. However, this could change at any time. * Code relying on this accident must call this function to verify that * this accident is still happening. * * You have been warned! */ static inline bool rcu_trace_implies_rcu_gp(void) { return true; } /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPTION kernels. */ #define cond_resched_tasks_rcu_qs() \ do { \ rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) /** * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states * @old_ts: jiffies at start of processing. * * This helper is for long-running softirq handlers, such as NAPI threads in * networking. The caller should initialize the variable passed in as @old_ts * at the beginning of the softirq handler. When invoked frequently, this macro * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will * provide both RCU and RCU-Tasks quiescent states. Note that this macro * modifies its old_ts argument. * * Because regions of code that have disabled softirq act as RCU read-side * critical sections, this macro should be invoked with softirq (and * preemption) enabled. * * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would * have more chance to invoke schedule() calls and provide necessary quiescent * states. As a contrast, calling cond_resched() only won't achieve the same * effect because cond_resched() does not provide RCU-Tasks quiescent states. */ #define rcu_softirq_qs_periodic(old_ts) \ do { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ time_after(jiffies, (old_ts) + HZ / 10)) { \ preempt_disable(); \ rcu_softirq_qs(); \ preempt_enable(); \ (old_ts) = jiffies; \ } \ } while (0) /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ #if defined(CONFIG_TREE_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> #else #error "Unknown RCU implementation specified to kernel configuration" #endif /* * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls * are needed for dynamic initialization and destruction of rcu_head * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for * dynamic initialization and destruction of statically allocated rcu_head * structures. However, rcu_head structures allocated dynamically in the * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } static inline void rcu_try_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, _THIS_IP_); } int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) # define rcu_try_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } static inline int rcu_read_lock_any_held(void) { return !preemptible(); } static inline int debug_lockdep_rcu_enabled(void) { return 0; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU /** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message * * This checks debug_lockdep_rcu_enabled() before checking (c) to * prevent early boot splats due to lockdep not yet being initialized, * and rechecks it after checking (c) to prevent false-positive splats * due to races with lockdep being disabled. See commit 3066820034b5dd * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data..unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } #else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } #endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) // See RCU_LOCKDEP_WARN() for an explanation of the double call to // debug_lockdep_rcu_enabled(). static inline bool lockdep_assert_rcu_helper(bool c) { return debug_lockdep_rcu_enabled() && (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && debug_lockdep_rcu_enabled(); } /** * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() * * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. */ #define lockdep_assert_in_rcu_read_lock() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) /** * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() * * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. * Note that local_bh_disable() and friends do not suffice here, instead an * actual rcu_read_lock_bh() is required. */ #define lockdep_assert_in_rcu_read_lock_bh() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) /** * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() * * Splats if lockdep is enabled and there is no rcu_read_lock_sched() * in effect. Note that preempt_disable() and friends do not suffice here, * instead an actual rcu_read_lock_sched() is required. */ #define lockdep_assert_in_rcu_read_lock_sched() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) /** * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader * * Splats if lockdep is enabled and there is no RCU reader of any * type in effect. Note that regions of code protected by things like * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify * as RCU readers. * * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY * kernels that are not also built with PREEMPT_COUNT. But if you have * lockdep enabled, you might as well also enable PREEMPT_COUNT. */ #define lockdep_assert_in_rcu_reader() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ !lock_is_held(&rcu_bh_lock_map) && \ !lock_is_held(&rcu_sched_lock_map) && \ preemptible())) #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) #define lockdep_assert_in_rcu_read_lock() do { } while (0) #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) #define lockdep_assert_in_rcu_reader() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple pointers markings to match different RCU implementations * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ #define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __unrcu_pointer(p, local) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)(p); \ rcu_check_sparse(p, __rcu); \ ((typeof(*p) __force __kernel *)(local)); \ }) /** * unrcu_pointer - mark a pointer as not being RCU protected * @p: pointer needing to lose its __rcu property * * Converts @p from an __rcu pointer to a __kernel pointer. * This allows an __rcu pointer to be used with xchg() and friends. */ #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) #define __rcu_access_pointer(p, local, space) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_check(p, local, c, space) \ ({ \ /* Dependency order vs. p above. */ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_protected(p, local, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define __rcu_dereference_raw(p, local) \ ({ \ /* Dependency order vs. p above. */ \ typeof(p) local = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) /** * rcu_replace_pointer() - replace an RCU pointer, returning its old value * @rcu_ptr: RCU pointer, whose old value is returned * @ptr: regular pointer * @c: the lockdep conditions under which the dereference will take place * * Perform a replacement, where @rcu_ptr is an RCU-annotated * pointer and @c is the lockdep argument that is passed to the * rcu_dereference_protected() call used to read that pointer. The old * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. */ #define rcu_replace_pointer(rcu_ptr, ptr, c) \ ({ \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ __tmp; \ }) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the * lockdep checks for being in an RCU read-side critical section. This is * useful when the value of this pointer is accessed, but the pointer is * not dereferenced, for example, when testing an RCU-protected pointer * against NULL. Although rcu_access_pointer() may also be used in cases * where update-side locks prevent the value of the pointer from changing, * you should instead use rcu_dereference_protected() for this use case. * Within an RCU read-side critical section, there is little reason to * use rcu_access_pointer(). * * It is usually best to test the rcu_access_pointer() return value * directly in order to avoid accidental dereferences being introduced * by later inattentive changes. In other words, assigning the * rcu_access_pointer() return value to a local variable results in an * accident waiting to happen. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as is * the case in the context of the RCU callback that is freeing up the data, * or after a synchronize_rcu() returns. This can be useful when tearing * down multi-linked structures after a grace period has elapsed. However, * rcu_dereference_protected() is normally preferred for this use case. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the * dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. * * For example: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); * * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). However, * please note that starting in v5.0 kernels, vanilla RCU grace periods * wait for local_bh_disable() regions of code in addition to regions of * code demarked by rcu_read_lock() and rcu_read_unlock(). This means * that synchronize_rcu(), call_rcu, and friends all take not only * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). * However, please note that starting in v5.0 kernels, vanilla RCU grace * periods wait for preempt_disable() regions of code in addition to * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). * This means that synchronize_rcu(), call_rcu, and friends all take not * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_sched_held(), \ __rcu) /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_check(p) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(). This is useful in cases where update-side locks * prevent the value of the pointer from changing. Please note that this * primitive does *not* prevent the compiler from repeating this reference * or combining it with other references, so it should not be used without * protection of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) /** * rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * This is a simple wrapper around rcu_dereference_check(). */ #define rcu_dereference(p) rcu_dereference_check(p, 0) /** * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows:: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also * wait for regions of code with preemption disabled, including regions of * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which * define synchronize_sched(), only code enclosed within rcu_read_lock() * and rcu_read_unlock() are guaranteed to be waited for. * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPTION kernel. * But if you want the full story, read on! * * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * In almost all situations, rcu_read_unlock() is immune from deadlock. * In recent kernels that have consolidated synchronize_sched() and * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity * also extends to the scheduler's runqueue and priority-inheritance * spinlocks, courtesy of the quiescent-state deferral that is carried * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); } /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent to rcu_read_lock(), but also disables softirqs. * Note that anything else that disables softirqs can also serve as an RCU * read-side critical section. However, please note that this equivalence * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } /** * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * * This is equivalent to rcu_read_lock(), but also disables preemption. * Read-side critical sections can also be introduced by anything else that * disables preemption, including local_irq_disable() and friends. However, * please note that the equivalence to rcu_read_lock() applies only to * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } /** * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched() for more information. */ static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); preempt_enable_notrace(); } /** * RCU_INIT_POINTER() - initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer *or* * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() *and* * * a. You have not made *any* reader-visible changes to * this structure since then *or* * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer *after* you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ .p = RCU_INITIALIZER(v) /* * Does the specified offset indicate that the corresponding rcu_head * structure can be handled by kvfree_rcu()? */ #define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree for double-argument invocations. * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. Rather than encoding a * function address in the embedded rcu_head structure, kfree_rcu() instead * encodes the offset of the rcu_head structure within the base structure. * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * The object to be freed can be allocated either by kmalloc() or * kmem_cache_alloc(). * * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) /** * kfree_rcu_mightsleep() - kfree an object after a grace period. * @ptr: pointer to kfree for single-argument invocations. * * When it comes to head-less variant, only one argument * is passed and that is just a pointer which has to be * freed after a grace period. Therefore the semantic is * * kfree_rcu_mightsleep(ptr); * * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() * annotation. Otherwise, please switch and embed the * rcu_head structure within the type of @ptr. */ #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_arg_2(ptr, rhf) \ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) { \ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \ } \ } while (0) #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ \ if (___p) \ kvfree_call_rcu(NULL, (void *) (___p)); \ } while (0) /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ /* Has the specified rcu_head structure been handed to call_rcu()? */ /** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * * If you intend to invoke rcu_head_after_call_rcu() to test whether a * given rcu_head structure has already been passed to call_rcu(), then * you must also invoke this rcu_head_init() function on it just after * allocating that structure. Calls to this function must not race with * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. */ static inline void rcu_head_init(struct rcu_head *rhp) { rhp->func = (rcu_callback_t)~0L; } /** * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including * the case where @rhp has already been invoked after a grace period. * Calls to this function must not race with callback invocation. One way * to avoid such races is to enclose the call to rcu_head_after_call_rcu() * in an RCU read-side critical section that includes a read-side fetch * of the pointer to the structure containing @rhp. */ static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { rcu_callback_t func = READ_ONCE(rhp->func); if (func == f) return true; WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } /* kernel/ksysfs.c definitions */ extern int rcu_expedited; extern int rcu_normal; DEFINE_LOCK_GUARD_0(rcu, do { rcu_read_lock(); /* * sparse doesn't call the cleanup function, * so just release immediately and don't track * the context. We don't need to anyway, since * the whole point of the guard is to not need * the explicit unlock. */ __release(RCU); } while (0), rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */
1 1 1 1 1 1 1546 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net> * * Based on the original implementation which is: * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright 2003 Andi Kleen, SuSE Labs. * * Parts of the original code have been moved to arch/x86/vdso/vma.c * * This file implements vsyscall emulation. vsyscalls are a legacy ABI: * Userspace can request certain kernel services by calling fixed * addresses. This concept is problematic: * * - It interferes with ASLR. * - It's awkward to write code that lives in kernel addresses but is * callable by userspace at fixed addresses. * - The whole concept is impossible for 32-bit compat userspace. * - UML cannot easily virtualize a vsyscall. * * As of mid-2014, I believe that there is no new userspace code that * will use a vsyscall if the vDSO is present. I hope that there will * soon be no new userspace code that will ever use a vsyscall. * * The code in this file emulates vsyscalls when notified of a page * fault to a vsyscall address. */ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/sched/signal.h> #include <linux/mm_types.h> #include <linux/syscalls.h> #include <linux/ratelimit.h> #include <asm/vsyscall.h> #include <asm/unistd.h> #include <asm/fixmap.h> #include <asm/traps.h> #include <asm/paravirt.h> #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init = #ifdef CONFIG_LEGACY_VSYSCALL_NONE NONE; #elif defined(CONFIG_LEGACY_VSYSCALL_XONLY) XONLY; #else #error VSYSCALL config is broken #endif static int __init vsyscall_setup(char *str) { if (str) { if (!strcmp("emulate", str)) vsyscall_mode = EMULATE; else if (!strcmp("xonly", str)) vsyscall_mode = XONLY; else if (!strcmp("none", str)) vsyscall_mode = NONE; else return -EINVAL; return 0; } return -EINVAL; } early_param("vsyscall", vsyscall_setup); static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, const char *message) { if (!show_unhandled_signals) return; printk_ratelimited("%s%s[%d] %s ip:%lx cs:%x sp:%lx ax:%lx si:%lx di:%lx\n", level, current->comm, task_pid_nr(current), message, regs->ip, regs->cs, regs->sp, regs->ax, regs->si, regs->di); } static int addr_to_vsyscall_nr(unsigned long addr) { int nr; if ((addr & ~0xC00UL) != VSYSCALL_ADDR) return -EINVAL; nr = (addr & 0xC00UL) >> 10; if (nr >= 3) return -EINVAL; return nr; } static bool write_ok_or_segv(unsigned long ptr, size_t size) { if (!access_ok((void __user *)ptr, size)) { struct thread_struct *thread = &current->thread; thread->error_code = X86_PF_USER | X86_PF_WRITE; thread->cr2 = ptr; thread->trap_nr = X86_TRAP_PF; force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr); return false; } else { return true; } } bool emulate_vsyscall(unsigned long error_code, struct pt_regs *regs, unsigned long address) { unsigned long caller; int vsyscall_nr, syscall_nr, tmp; long ret; unsigned long orig_dx; /* Write faults or kernel-privilege faults never get fixed up. */ if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER) return false; if (!(error_code & X86_PF_INSTR)) { /* Failed vsyscall read */ if (vsyscall_mode == EMULATE) return false; /* * User code tried and failed to read the vsyscall page. */ warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround"); return false; } /* * No point in checking CS -- the only way to get here is a user mode * trap to a high address, which means that we're in 64-bit user code. */ WARN_ON_ONCE(address != regs->ip); if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); return false; } vsyscall_nr = addr_to_vsyscall_nr(address); trace_emulate_vsyscall(vsyscall_nr); if (vsyscall_nr < 0) { warn_bad_vsyscall(KERN_WARNING, regs, "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); goto sigsegv; } if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { warn_bad_vsyscall(KERN_WARNING, regs, "vsyscall with bad stack (exploit attempt?)"); goto sigsegv; } /* * Check for access_ok violations and find the syscall nr. * * NULL is a valid user pointer (in the access_ok sense) on 32-bit and * 64-bit, so we don't need to special-case it here. For all the * vsyscalls, NULL means "don't write anything" not "write it at * address 0". */ switch (vsyscall_nr) { case 0: if (!write_ok_or_segv(regs->di, sizeof(struct __kernel_old_timeval)) || !write_ok_or_segv(regs->si, sizeof(struct timezone))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_gettimeofday; break; case 1: if (!write_ok_or_segv(regs->di, sizeof(__kernel_old_time_t))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_time; break; case 2: if (!write_ok_or_segv(regs->di, sizeof(unsigned)) || !write_ok_or_segv(regs->si, sizeof(unsigned))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_getcpu; break; } /* * Handle seccomp. regs->ip must be the original value. * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst. * * We could optimize the seccomp disabled case, but performance * here doesn't matter. */ regs->orig_ax = syscall_nr; regs->ax = -ENOSYS; tmp = secure_computing(); if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { warn_bad_vsyscall(KERN_DEBUG, regs, "seccomp tried to change syscall nr or ip"); force_exit_sig(SIGSYS); return true; } regs->orig_ax = -1; if (tmp) goto do_ret; /* skip requested */ /* * With a real vsyscall, page faults cause SIGSEGV. */ ret = -EFAULT; switch (vsyscall_nr) { case 0: /* this decodes regs->di and regs->si on its own */ ret = __x64_sys_gettimeofday(regs); break; case 1: /* this decodes regs->di on its own */ ret = __x64_sys_time(regs); break; case 2: /* while we could clobber regs->dx, we didn't in the past... */ orig_dx = regs->dx; regs->dx = 0; /* this decodes regs->di, regs->si and regs->dx on its own */ ret = __x64_sys_getcpu(regs); regs->dx = orig_dx; break; } check_fault: if (ret == -EFAULT) { /* Bad news -- userspace fed a bad pointer to a vsyscall. */ warn_bad_vsyscall(KERN_INFO, regs, "vsyscall fault (exploit attempt?)"); goto sigsegv; } regs->ax = ret; do_ret: /* Emulate a ret instruction. */ regs->ip = caller; regs->sp += 8; return true; sigsegv: force_sig(SIGSEGV); return true; } /* * A pseudo VMA to allow ptrace access for the vsyscall page. This only * covers the 64bit vsyscall page now. 32bit has a real VMA now and does * not need special handling anymore: */ static const char *gate_vma_name(struct vm_area_struct *vma) { return "[vsyscall]"; } static const struct vm_operations_struct gate_vma_ops = { .name = gate_vma_name, }; static struct vm_area_struct gate_vma __ro_after_init = { .vm_start = VSYSCALL_ADDR, .vm_end = VSYSCALL_ADDR + PAGE_SIZE, .vm_page_prot = PAGE_READONLY_EXEC, .vm_flags = VM_READ | VM_EXEC, .vm_ops = &gate_vma_ops, }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_COMPAT if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags)) return NULL; #endif if (vsyscall_mode == NONE) return NULL; return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = get_gate_vma(mm); if (!vma) return 0; return (addr >= vma->vm_start) && (addr < vma->vm_end); } /* * Use this when you have no reliable mm, typically from interrupt * context. It is less reliable than using a task's mm and may give * false positives. */ int in_gate_area_no_mm(unsigned long addr) { return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } /* * The VSYSCALL page is the only user-accessible page in the kernel address * range. Normally, the kernel page tables can have _PAGE_USER clear, but * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls * are enabled. * * Some day we may create a "minimal" vsyscall mode in which we emulate * vsyscalls but leave the page not present. If so, we skip calling * this. */ void __init set_vsyscall_pgtable_user_bits(pgd_t *root) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); p4d = p4d_offset(pgd, VSYSCALL_ADDR); #if CONFIG_PGTABLE_LEVELS >= 5 set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); #endif pud = pud_offset(p4d, VSYSCALL_ADDR); set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); pmd = pmd_offset(pud, VSYSCALL_ADDR); set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); } void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); /* * For full emulation, the page needs to exist for real. In * execute-only mode, there is no PTE at all backing the vsyscall * page. */ if (vsyscall_mode == EMULATE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); set_vsyscall_pgtable_user_bits(swapper_pg_dir); } if (vsyscall_mode == XONLY) vm_flags_init(&gate_vma, VM_EXEC); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); }
7 7 7 7 2 1 1 1 7 7 2 1 7 7 7 4 3 3 3 7 3 7 7 1 7 7 1 7 2 2 1 1 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Asus notebook built-in keyboard. * Fixes small logical maximum to match usage maximum. * * Currently supported devices are: * EeeBook X205TA * VivoBook E200HA * * Copyright (c) 2016 Yusuke Fujimaki <usk.fujimaki@gmail.com> * * This module based on hid-ortek by * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> * Copyright (c) 2011 Jiri Kosina * * This module has been updated to add support for Asus i2c touchpad. * * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org> * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com> * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com> */ /* */ #include <linux/dmi.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/platform_data/x86/asus-wmi.h> #include <linux/input/mt.h> #include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */ #include <linux/power_supply.h> #include <linux/leds.h> #include "hid-ids.h" MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>"); MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>"); MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>"); MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>"); MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); #define T100_TPAD_INTF 2 #define MEDION_E1239T_TPAD_INTF 1 #define E1239T_TP_TOGGLE_REPORT_ID 0x05 #define T100CHI_MOUSE_REPORT_ID 0x06 #define FEATURE_REPORT_ID 0x0d #define INPUT_REPORT_ID 0x5d #define FEATURE_KBD_REPORT_ID 0x5a #define FEATURE_KBD_REPORT_SIZE 16 #define FEATURE_KBD_LED_REPORT_ID1 0x5d #define FEATURE_KBD_LED_REPORT_ID2 0x5e #define SUPPORT_KBD_BACKLIGHT BIT(0) #define MAX_TOUCH_MAJOR 8 #define MAX_PRESSURE 128 #define BTN_LEFT_MASK 0x01 #define CONTACT_TOOL_TYPE_MASK 0x80 #define CONTACT_X_MSB_MASK 0xf0 #define CONTACT_Y_MSB_MASK 0x0f #define CONTACT_TOUCH_MAJOR_MASK 0x07 #define CONTACT_PRESSURE_MASK 0x7f #define BATTERY_REPORT_ID (0x03) #define BATTERY_REPORT_SIZE (1 + 8) #define BATTERY_LEVEL_MAX ((u8)255) #define BATTERY_STAT_DISCONNECT (0) #define BATTERY_STAT_CHARGING (1) #define BATTERY_STAT_FULL (2) #define QUIRK_FIX_NOTEBOOK_REPORT BIT(0) #define QUIRK_NO_INIT_REPORTS BIT(1) #define QUIRK_SKIP_INPUT_MAPPING BIT(2) #define QUIRK_IS_MULTITOUCH BIT(3) #define QUIRK_NO_CONSUMER_USAGES BIT(4) #define QUIRK_USE_KBD_BACKLIGHT BIT(5) #define QUIRK_T100_KEYBOARD BIT(6) #define QUIRK_T100CHI BIT(7) #define QUIRK_G752_KEYBOARD BIT(8) #define QUIRK_T90CHI BIT(9) #define QUIRK_MEDION_E1239T BIT(10) #define QUIRK_ROG_NKEY_KEYBOARD BIT(11) #define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12) #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ QUIRK_NO_INIT_REPORTS | \ QUIRK_NO_CONSUMER_USAGES) #define I2C_TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \ QUIRK_SKIP_INPUT_MAPPING | \ QUIRK_IS_MULTITOUCH) #define TRKID_SGN ((TRKID_MAX + 1) >> 1) struct asus_kbd_leds { struct led_classdev cdev; struct hid_device *hdev; struct work_struct work; unsigned int brightness; spinlock_t lock; bool removed; }; struct asus_touchpad_info { int max_x; int max_y; int res_x; int res_y; int contact_size; int max_contacts; int report_size; }; struct asus_drvdata { unsigned long quirks; struct hid_device *hdev; struct input_dev *input; struct input_dev *tp_kbd_input; struct asus_kbd_leds *kbd_backlight; const struct asus_touchpad_info *tp; bool enable_backlight; struct power_supply *battery; struct power_supply_desc battery_desc; int battery_capacity; int battery_stat; bool battery_in_query; unsigned long battery_next_query; }; static int asus_report_battery(struct asus_drvdata *, u8 *, int); static const struct asus_touchpad_info asus_i2c_tp = { .max_x = 2794, .max_y = 1758, .contact_size = 5, .max_contacts = 5, .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */, }; static const struct asus_touchpad_info asus_t100ta_tp = { .max_x = 2240, .max_y = 1120, .res_x = 30, /* units/mm */ .res_y = 27, /* units/mm */ .contact_size = 5, .max_contacts = 5, .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */, }; static const struct asus_touchpad_info asus_t100ha_tp = { .max_x = 2640, .max_y = 1320, .res_x = 30, /* units/mm */ .res_y = 29, /* units/mm */ .contact_size = 5, .max_contacts = 5, .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */, }; static const struct asus_touchpad_info asus_t200ta_tp = { .max_x = 3120, .max_y = 1716, .res_x = 30, /* units/mm */ .res_y = 28, /* units/mm */ .contact_size = 5, .max_contacts = 5, .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */, }; static const struct asus_touchpad_info asus_t100chi_tp = { .max_x = 2640, .max_y = 1320, .res_x = 31, /* units/mm */ .res_y = 29, /* units/mm */ .contact_size = 3, .max_contacts = 4, .report_size = 15 /* 2 byte header + 3 * 4 + 1 byte footer */, }; static const struct asus_touchpad_info medion_e1239t_tp = { .max_x = 2640, .max_y = 1380, .res_x = 29, /* units/mm */ .res_y = 28, /* units/mm */ .contact_size = 5, .max_contacts = 5, .report_size = 32 /* 2 byte header + 5 * 5 + 5 byte footer */, }; static void asus_report_contact_down(struct asus_drvdata *drvdat, int toolType, u8 *data) { struct input_dev *input = drvdat->input; int touch_major, pressure, x, y; x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1]; y = drvdat->tp->max_y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); if (drvdat->tp->contact_size < 5) return; if (toolType == MT_TOOL_PALM) { touch_major = MAX_TOUCH_MAJOR; pressure = MAX_PRESSURE; } else { touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK; pressure = data[4] & CONTACT_PRESSURE_MASK; } input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major); input_report_abs(input, ABS_MT_PRESSURE, pressure); } /* Required for Synaptics Palm Detection */ static void asus_report_tool_width(struct asus_drvdata *drvdat) { struct input_mt *mt = drvdat->input->mt; struct input_mt_slot *oldest; int oldid, i; if (drvdat->tp->contact_size < 5) return; oldest = NULL; oldid = mt->trkid; for (i = 0; i < mt->num_slots; ++i) { struct input_mt_slot *ps = &mt->slots[i]; int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); if (id < 0) continue; if ((id - oldid) & TRKID_SGN) { oldest = ps; oldid = id; } } if (oldest) { input_report_abs(drvdat->input, ABS_TOOL_WIDTH, input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR)); } } static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size) { int i, toolType = MT_TOOL_FINGER; u8 *contactData = data + 2; if (size != drvdat->tp->report_size) return 0; for (i = 0; i < drvdat->tp->max_contacts; i++) { bool down = !!(data[1] & BIT(i+3)); if (drvdat->tp->contact_size >= 5) toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ? MT_TOOL_PALM : MT_TOOL_FINGER; input_mt_slot(drvdat->input, i); input_mt_report_slot_state(drvdat->input, toolType, down); if (down) { asus_report_contact_down(drvdat, toolType, contactData); contactData += drvdat->tp->contact_size; } } input_report_key(drvdat->input, BTN_LEFT, data[1] & BTN_LEFT_MASK); asus_report_tool_width(drvdat); input_mt_sync_frame(drvdat->input); input_sync(drvdat->input); return 1; } static int asus_e1239t_event(struct asus_drvdata *drvdat, u8 *data, int size) { if (size != 3) return 0; /* Handle broken mute key which only sends press events */ if (!drvdat->tp && data[0] == 0x02 && data[1] == 0xe2 && data[2] == 0x00) { input_report_key(drvdat->input, KEY_MUTE, 1); input_sync(drvdat->input); input_report_key(drvdat->input, KEY_MUTE, 0); input_sync(drvdat->input); return 1; } /* Handle custom touchpad toggle key which only sends press events */ if (drvdat->tp_kbd_input && data[0] == 0x05 && data[1] == 0x02 && data[2] == 0x28) { input_report_key(drvdat->tp_kbd_input, KEY_F21, 1); input_sync(drvdat->tp_kbd_input); input_report_key(drvdat->tp_kbd_input, KEY_F21, 0); input_sync(drvdat->tp_kbd_input); return 1; } return 0; } static int asus_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && (usage->hid & HID_USAGE) != 0x00 && (usage->hid & HID_USAGE) != 0xff && !usage->type) { hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", usage->hid & HID_USAGE); } return 0; } static int asus_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->battery && data[0] == BATTERY_REPORT_ID) return asus_report_battery(drvdata, data, size); if (drvdata->tp && data[0] == INPUT_REPORT_ID) return asus_report_input(drvdata, data, size); if (drvdata->quirks & QUIRK_MEDION_E1239T) return asus_e1239t_event(drvdata, data, size); /* * Skip these report ID, the device emits a continuous stream associated * with the AURA mode it is in which looks like an 'echo'. */ if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2) return -1; if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { /* * G713 and G733 send these codes on some keypresses, depending on * the key pressed it can trigger a shutdown event if not caught. */ if (data[0] == 0x02 && data[1] == 0x30) { return -1; } } if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) { /* * CLAYMORE II keyboard sends this packet when it goes to sleep * this causes the whole system to go into suspend. */ if(size == 2 && data[0] == 0x02 && data[1] == 0x00) { return -1; } } return 0; } static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size) { unsigned char *dmabuf; int ret; dmabuf = kmemdup(buf, buf_size, GFP_KERNEL); if (!dmabuf) return -ENOMEM; /* * The report ID should be set from the incoming buffer due to LED and key * interfaces having different pages */ ret = hid_hw_raw_request(hdev, buf[0], dmabuf, buf_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); kfree(dmabuf); return ret; } static int asus_kbd_init(struct hid_device *hdev, u8 report_id) { const u8 buf[] = { report_id, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; int ret; ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); if (ret < 0) hid_err(hdev, "Asus failed to send init command: %d\n", ret); return ret; } static int asus_kbd_get_functions(struct hid_device *hdev, unsigned char *kbd_func, u8 report_id) { const u8 buf[] = { report_id, 0x05, 0x20, 0x31, 0x00, 0x08 }; u8 *readbuf; int ret; ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); if (ret < 0) { hid_err(hdev, "Asus failed to send configuration command: %d\n", ret); return ret; } readbuf = kzalloc(FEATURE_KBD_REPORT_SIZE, GFP_KERNEL); if (!readbuf) return -ENOMEM; ret = hid_hw_raw_request(hdev, FEATURE_KBD_REPORT_ID, readbuf, FEATURE_KBD_REPORT_SIZE, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { hid_err(hdev, "Asus failed to request functions: %d\n", ret); kfree(readbuf); return ret; } *kbd_func = readbuf[6]; kfree(readbuf); return ret; } static int asus_kbd_disable_oobe(struct hid_device *hdev) { const u8 init[][6] = { { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }, { FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 }, { FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 }, { FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF } }; int ret; for (size_t i = 0; i < ARRAY_SIZE(init); i++) { ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i])); if (ret < 0) return ret; } hid_info(hdev, "Disabled OOBE for keyboard\n"); return 0; } static void asus_schedule_work(struct asus_kbd_leds *led) { unsigned long flags; spin_lock_irqsave(&led->lock, flags); if (!led->removed) schedule_work(&led->work); spin_unlock_irqrestore(&led->lock, flags); } static void asus_kbd_backlight_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); unsigned long flags; spin_lock_irqsave(&led->lock, flags); led->brightness = brightness; spin_unlock_irqrestore(&led->lock, flags); asus_schedule_work(led); } static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); enum led_brightness brightness; unsigned long flags; spin_lock_irqsave(&led->lock, flags); brightness = led->brightness; spin_unlock_irqrestore(&led->lock, flags); return brightness; } static void asus_kbd_backlight_work(struct work_struct *work) { struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 }; int ret; unsigned long flags; spin_lock_irqsave(&led->lock, flags); buf[4] = led->brightness; spin_unlock_irqrestore(&led->lock, flags); ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf)); if (ret < 0) hid_err(led->hdev, "Asus failed to set keyboard backlight: %d\n", ret); } /* WMI-based keyboard backlight LED control (via asus-wmi driver) takes * precedence. We only activate HID-based backlight control when the * WMI control is not available. */ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); u32 value; int ret; if (!IS_ENABLED(CONFIG_ASUS_WMI)) return false; if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && dmi_check_system(asus_use_hid_led_dmi_ids)) { hid_info(hdev, "using HID for asus::kbd_backlight\n"); return false; } ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value); hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value); if (ret) return false; return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT); } static int asus_kbd_register_leds(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); unsigned char kbd_func; int ret; if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { /* Initialize keyboard */ ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID); if (ret < 0) return ret; /* The LED endpoint is initialised in two HID */ ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID1); if (ret < 0) return ret; ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2); if (ret < 0) return ret; if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) { ret = asus_kbd_disable_oobe(hdev); if (ret < 0) return ret; } } else { /* Initialize keyboard */ ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID); if (ret < 0) return ret; /* Get keyboard functions */ ret = asus_kbd_get_functions(hdev, &kbd_func, FEATURE_KBD_REPORT_ID); if (ret < 0) return ret; /* Check for backlight support */ if (!(kbd_func & SUPPORT_KBD_BACKLIGHT)) return -ENODEV; } drvdata->kbd_backlight = devm_kzalloc(&hdev->dev, sizeof(struct asus_kbd_leds), GFP_KERNEL); if (!drvdata->kbd_backlight) return -ENOMEM; drvdata->kbd_backlight->removed = false; drvdata->kbd_backlight->brightness = 0; drvdata->kbd_backlight->hdev = hdev; drvdata->kbd_backlight->cdev.name = "asus::kbd_backlight"; drvdata->kbd_backlight->cdev.max_brightness = 3; drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set; drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get; INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); spin_lock_init(&drvdata->kbd_backlight->lock); ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev); if (ret < 0) { /* No need to have this still around */ devm_kfree(&hdev->dev, drvdata->kbd_backlight); } return ret; } /* * [0] REPORT_ID (same value defined in report descriptor) * [1] rest battery level. range [0..255] * [2]..[7] Bluetooth hardware address (MAC address) * [8] charging status * = 0 : AC offline / discharging * = 1 : AC online / charging * = 2 : AC online / fully charged */ static int asus_parse_battery(struct asus_drvdata *drvdata, u8 *data, int size) { u8 sts; u8 lvl; int val; lvl = data[1]; sts = data[8]; drvdata->battery_capacity = ((int)lvl * 100) / (int)BATTERY_LEVEL_MAX; switch (sts) { case BATTERY_STAT_CHARGING: val = POWER_SUPPLY_STATUS_CHARGING; break; case BATTERY_STAT_FULL: val = POWER_SUPPLY_STATUS_FULL; break; case BATTERY_STAT_DISCONNECT: default: val = POWER_SUPPLY_STATUS_DISCHARGING; break; } drvdata->battery_stat = val; return 0; } static int asus_report_battery(struct asus_drvdata *drvdata, u8 *data, int size) { /* notify only the autonomous event by device */ if ((drvdata->battery_in_query == false) && (size == BATTERY_REPORT_SIZE)) power_supply_changed(drvdata->battery); return 0; } static int asus_battery_query(struct asus_drvdata *drvdata) { u8 *buf; int ret = 0; buf = kmalloc(BATTERY_REPORT_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; drvdata->battery_in_query = true; ret = hid_hw_raw_request(drvdata->hdev, BATTERY_REPORT_ID, buf, BATTERY_REPORT_SIZE, HID_INPUT_REPORT, HID_REQ_GET_REPORT); drvdata->battery_in_query = false; if (ret == BATTERY_REPORT_SIZE) ret = asus_parse_battery(drvdata, buf, BATTERY_REPORT_SIZE); else ret = -ENODATA; kfree(buf); return ret; } static enum power_supply_property asus_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_MODEL_NAME, }; #define QUERY_MIN_INTERVAL (60 * HZ) /* 60[sec] */ static int asus_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct asus_drvdata *drvdata = power_supply_get_drvdata(psy); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_STATUS: case POWER_SUPPLY_PROP_CAPACITY: if (time_before(drvdata->battery_next_query, jiffies)) { drvdata->battery_next_query = jiffies + QUERY_MIN_INTERVAL; ret = asus_battery_query(drvdata); if (ret) return ret; } if (psp == POWER_SUPPLY_PROP_STATUS) val->intval = drvdata->battery_stat; else val->intval = drvdata->battery_capacity; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = drvdata->hdev->name; break; default: ret = -EINVAL; break; } return ret; } static int asus_battery_probe(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); struct power_supply_config pscfg = { .drv_data = drvdata }; int ret = 0; drvdata->battery_capacity = 0; drvdata->battery_stat = POWER_SUPPLY_STATUS_UNKNOWN; drvdata->battery_in_query = false; drvdata->battery_desc.properties = asus_battery_props; drvdata->battery_desc.num_properties = ARRAY_SIZE(asus_battery_props); drvdata->battery_desc.get_property = asus_battery_get_property; drvdata->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; drvdata->battery_desc.use_for_apm = 0; drvdata->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "asus-keyboard-%s-battery", strlen(hdev->uniq) ? hdev->uniq : dev_name(&hdev->dev)); if (!drvdata->battery_desc.name) return -ENOMEM; drvdata->battery_next_query = jiffies; drvdata->battery = devm_power_supply_register(&hdev->dev, &(drvdata->battery_desc), &pscfg); if (IS_ERR(drvdata->battery)) { ret = PTR_ERR(drvdata->battery); drvdata->battery = NULL; hid_err(hdev, "Unable to register battery device\n"); return ret; } power_supply_powers(drvdata->battery, &hdev->dev); return ret; } static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct input_dev *input = hi->input; struct asus_drvdata *drvdata = hid_get_drvdata(hdev); /* T100CHI uses MULTI_INPUT, bind the touchpad to the mouse hid_input */ if (drvdata->quirks & QUIRK_T100CHI && hi->report->id != T100CHI_MOUSE_REPORT_ID) return 0; /* Handle MULTI_INPUT on E1239T mouse/touchpad USB interface */ if (drvdata->tp && (drvdata->quirks & QUIRK_MEDION_E1239T)) { switch (hi->report->id) { case E1239T_TP_TOGGLE_REPORT_ID: input_set_capability(input, EV_KEY, KEY_F21); input->name = "Asus Touchpad Keys"; drvdata->tp_kbd_input = input; return 0; case INPUT_REPORT_ID: break; /* Touchpad report, handled below */ default: return 0; /* Ignore other reports */ } } if (drvdata->tp) { int ret; input_set_abs_params(input, ABS_MT_POSITION_X, 0, drvdata->tp->max_x, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, drvdata->tp->max_y, 0, 0); input_abs_set_res(input, ABS_MT_POSITION_X, drvdata->tp->res_x); input_abs_set_res(input, ABS_MT_POSITION_Y, drvdata->tp->res_y); if (drvdata->tp->contact_size >= 5) { input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0); input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0); } __set_bit(BTN_LEFT, input->keybit); __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); ret = input_mt_init_slots(input, drvdata->tp->max_contacts, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Asus input mt init slots failed: %d\n", ret); return ret; } } drvdata->input = input; if (drvdata->enable_backlight && !asus_kbd_wmi_led_control_present(hdev) && asus_kbd_register_leds(hdev)) hid_warn(hdev, "Failed to initialize backlight.\n"); return 0; } #define asus_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, \ max, EV_KEY, (c)) static int asus_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) { /* Don't map anything from the HID report. * We do it all manually in asus_input_configured */ return -1; } /* * Ignore a bunch of bogus collections in the T100CHI descriptor. * This avoids a bunch of non-functional hid_input devices getting * created because of the T100CHI using HID_QUIRK_MULTI_INPUT. */ if ((drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) && (field->application == (HID_UP_GENDESK | 0x0080) || field->application == HID_GD_MOUSE || usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) || usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) || usage->hid == (HID_UP_GENDEVCTRLS | 0x0026))) return -1; /* ASUS-specific keyboard hotkeys and led backlight */ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_ASUSVENDOR) { switch (usage->hid & HID_USAGE) { case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break; case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break; case 0x6c: asus_map_key_clear(KEY_SLEEP); break; case 0x7c: asus_map_key_clear(KEY_MICMUTE); break; case 0x82: asus_map_key_clear(KEY_CAMERA); break; case 0x88: asus_map_key_clear(KEY_RFKILL); break; case 0xb5: asus_map_key_clear(KEY_CALC); break; case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break; case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break; case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break; case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */ case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */ case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */ case 0x5c: asus_map_key_clear(KEY_PROG3); break; /* Fn+Space Power4Gear */ case 0x99: asus_map_key_clear(KEY_PROG4); break; /* Fn+F5 "fan" symbol */ case 0xae: asus_map_key_clear(KEY_PROG4); break; /* Fn+F5 "fan" symbol */ case 0x92: asus_map_key_clear(KEY_CALC); break; /* Fn+Ret "Calc" symbol */ case 0xb2: asus_map_key_clear(KEY_PROG2); break; /* Fn+Left previous aura */ case 0xb3: asus_map_key_clear(KEY_PROG3); break; /* Fn+Left next aura */ case 0x6a: asus_map_key_clear(KEY_F13); break; /* Screenpad toggle */ case 0x4b: asus_map_key_clear(KEY_F14); break; /* Arrows/Pg-Up/Dn toggle */ case 0xa5: asus_map_key_clear(KEY_F15); break; /* ROG Ally left back */ case 0xa6: asus_map_key_clear(KEY_F16); break; /* ROG Ally QAM button */ case 0xa7: asus_map_key_clear(KEY_F17); break; /* ROG Ally ROG long-press */ case 0xa8: asus_map_key_clear(KEY_F18); break; /* ROG Ally ROG long-press-release */ default: /* ASUS lazily declares 256 usages, ignore the rest, * as some make the keyboard appear as a pointer device. */ return -1; } /* * Check and enable backlight only on devices with UsagePage == * 0xff31 to avoid initializing the keyboard firmware multiple * times on devices with multiple HID descriptors but same * PID/VID. */ if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) drvdata->enable_backlight = true; set_bit(EV_REP, hi->input->evbit); return 1; } if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) { switch (usage->hid & HID_USAGE) { case 0xff01: asus_map_key_clear(BTN_1); break; case 0xff02: asus_map_key_clear(BTN_2); break; case 0xff03: asus_map_key_clear(BTN_3); break; case 0xff04: asus_map_key_clear(BTN_4); break; case 0xff05: asus_map_key_clear(BTN_5); break; case 0xff06: asus_map_key_clear(BTN_6); break; case 0xff07: asus_map_key_clear(BTN_7); break; case 0xff08: asus_map_key_clear(BTN_8); break; case 0xff09: asus_map_key_clear(BTN_9); break; case 0xff0a: asus_map_key_clear(BTN_A); break; case 0xff0b: asus_map_key_clear(BTN_B); break; case 0x00f1: asus_map_key_clear(KEY_WLAN); break; case 0x00f2: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x00f3: asus_map_key_clear(KEY_BRIGHTNESSUP); break; case 0x00f4: asus_map_key_clear(KEY_DISPLAY_OFF); break; case 0x00f7: asus_map_key_clear(KEY_CAMERA); break; case 0x00f8: asus_map_key_clear(KEY_PROG1); break; default: return 0; } set_bit(EV_REP, hi->input->evbit); return 1; } if (drvdata->quirks & QUIRK_NO_CONSUMER_USAGES && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) { switch (usage->hid & HID_USAGE) { case 0xe2: /* Mute */ case 0xe9: /* Volume up */ case 0xea: /* Volume down */ return 0; default: /* Ignore dummy Consumer usages which make the * keyboard incorrectly appear as a pointer device. */ return -1; } } /* * The mute button is broken and only sends press events, we * deal with this in our raw_event handler, so do not map it. */ if ((drvdata->quirks & QUIRK_MEDION_E1239T) && usage->hid == (HID_UP_CONSUMER | 0xe2)) { input_set_capability(hi->input, EV_KEY, KEY_MUTE); return -1; } return 0; } static int asus_start_multitouch(struct hid_device *hdev) { int ret; static const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 }; unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL); if (!dmabuf) { ret = -ENOMEM; hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret); return ret; } ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); kfree(dmabuf); if (ret != sizeof(buf)) { hid_err(hdev, "Asus failed to start multitouch: %d\n", ret); return ret; } return 0; } static int __maybe_unused asus_resume(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); int ret = 0; if (drvdata->kbd_backlight) { const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, drvdata->kbd_backlight->cdev.brightness }; ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); if (ret < 0) { hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret); goto asus_resume_err; } } asus_resume_err: return ret; } static int __maybe_unused asus_reset_resume(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->tp) return asus_start_multitouch(hdev); return 0; } static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct asus_drvdata *drvdata; drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); if (drvdata == NULL) { hid_err(hdev, "Can't alloc Asus descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, drvdata); drvdata->quirks = id->driver_data; /* * T90CHI's keyboard dock returns same ID values as T100CHI's dock. * Thus, identify T90CHI dock with product name string. */ if (strstr(hdev->name, "T90CHI")) { drvdata->quirks &= ~QUIRK_T100CHI; drvdata->quirks |= QUIRK_T90CHI; } if (drvdata->quirks & QUIRK_IS_MULTITOUCH) drvdata->tp = &asus_i2c_tp; if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) { drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING; /* * The T100HA uses the same USB-ids as the T100TAF and * the T200TA uses the same USB-ids as the T100TA, while * both have different max x/y values as the T100TA[F]. */ if (dmi_match(DMI_PRODUCT_NAME, "T100HAN")) drvdata->tp = &asus_t100ha_tp; else if (dmi_match(DMI_PRODUCT_NAME, "T200TA")) drvdata->tp = &asus_t200ta_tp; else drvdata->tp = &asus_t100ta_tp; } } if (drvdata->quirks & QUIRK_T100CHI) { /* * All functionality is on a single HID interface and for * userspace the touchpad must be a separate input_dev. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; drvdata->tp = &asus_t100chi_tp; } if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) { struct usb_host_interface *alt = to_usb_interface(hdev->dev.parent)->altsetting; if (alt->desc.bInterfaceNumber == MEDION_E1239T_TPAD_INTF) { /* For separate input-devs for tp and tp toggle key */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; drvdata->quirks |= QUIRK_SKIP_INPUT_MAPPING; drvdata->tp = &medion_e1239t_tp; } } if (drvdata->quirks & QUIRK_NO_INIT_REPORTS) hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; drvdata->hdev = hdev; if (drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) { ret = asus_battery_probe(hdev); if (ret) { hid_err(hdev, "Asus hid battery_probe failed: %d\n", ret); return ret; } } ret = hid_parse(hdev); if (ret) { hid_err(hdev, "Asus hid parse failed: %d\n", ret); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "Asus hw start failed: %d\n", ret); return ret; } if (!drvdata->input) { hid_err(hdev, "Asus input not registered\n"); ret = -ENOMEM; goto err_stop_hw; } if (drvdata->tp) { drvdata->input->name = "Asus TouchPad"; } else { drvdata->input->name = "Asus Keyboard"; } if (drvdata->tp) { ret = asus_start_multitouch(hdev); if (ret) goto err_stop_hw; } return 0; err_stop_hw: hid_hw_stop(hdev); return ret; } static void asus_remove(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); unsigned long flags; if (drvdata->kbd_backlight) { spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags); drvdata->kbd_backlight->removed = true; spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags); cancel_work_sync(&drvdata->kbd_backlight->work); } hid_hw_stop(hdev); } static const __u8 asus_g752_fixed_rdesc[] = { 0x19, 0x00, /* Usage Minimum (0x00) */ 0x2A, 0xFF, 0x00, /* Usage Maximum (0xFF) */ }; static const __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT && *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) { hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); rdesc[55] = 0xdd; } /* For the T100TA/T200TA keyboard dock */ if (drvdata->quirks & QUIRK_T100_KEYBOARD && (*rsize == 76 || *rsize == 101) && rdesc[73] == 0x81 && rdesc[74] == 0x01) { hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n"); rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT; } /* For the T100CHI/T90CHI keyboard dock */ if (drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) { int rsize_orig; int offs; if (drvdata->quirks & QUIRK_T100CHI) { rsize_orig = 403; offs = 388; } else { rsize_orig = 306; offs = 291; } /* * Change Usage (76h) to Usage Minimum (00h), Usage Maximum * (FFh) and clear the flags in the Input() byte. * Note the descriptor has a bogus 0 byte at the end so we * only need 1 extra byte. */ if (*rsize == rsize_orig && rdesc[offs] == 0x09 && rdesc[offs + 1] == 0x76) { *rsize = rsize_orig + 1; rdesc = kmemdup(rdesc, *rsize, GFP_KERNEL); if (!rdesc) return NULL; hid_info(hdev, "Fixing up %s keyb report descriptor\n", drvdata->quirks & QUIRK_T100CHI ? "T100CHI" : "T90CHI"); memmove(rdesc + offs + 4, rdesc + offs + 2, 12); rdesc[offs] = 0x19; rdesc[offs + 1] = 0x00; rdesc[offs + 2] = 0x29; rdesc[offs + 3] = 0xff; rdesc[offs + 14] = 0x00; } } if (drvdata->quirks & QUIRK_G752_KEYBOARD && *rsize == 75 && rdesc[61] == 0x15 && rdesc[62] == 0x00) { /* report is missing usage minimum and maximum */ __u8 *new_rdesc; size_t new_size = *rsize + sizeof(asus_g752_fixed_rdesc); new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL); if (new_rdesc == NULL) return rdesc; hid_info(hdev, "Fixing up Asus G752 keyb report descriptor\n"); /* copy the valid part */ memcpy(new_rdesc, rdesc, 61); /* insert missing part */ memcpy(new_rdesc + 61, asus_g752_fixed_rdesc, sizeof(asus_g752_fixed_rdesc)); /* copy remaining data */ memcpy(new_rdesc + 61 + sizeof(asus_g752_fixed_rdesc), rdesc + 61, *rsize - 61); *rsize = new_size; rdesc = new_rdesc; } if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize == 331 && rdesc[190] == 0x85 && rdesc[191] == 0x5a && rdesc[204] == 0x95 && rdesc[205] == 0x05) { hid_info(hdev, "Fixing up Asus N-KEY keyb report descriptor\n"); rdesc[205] = 0x01; } /* match many more n-key devices */ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) { for (int i = 0; i < *rsize - 15; i++) { /* offset to the count from 0x5a report part always 14 */ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a && rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) { hid_info(hdev, "Fixing up Asus N-Key report descriptor\n"); rdesc[i + 15] = 0x01; break; } } } return rdesc; } static const struct hid_device_id asus_devices[] = { { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD), I2C_KEYBOARD_QUIRKS}, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD), I2C_TOUCHPAD_QUIRKS }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1), QUIRK_USE_KBD_BACKLIGHT }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD), QUIRK_USE_KBD_BACKLIGHT }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD), QUIRK_ROG_CLAYMORE_II_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI }, { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T), QUIRK_MEDION_E1239T }, /* * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard * part, while letting hid-multitouch.c handle the touchpad. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) }, { } }; MODULE_DEVICE_TABLE(hid, asus_devices); static struct hid_driver asus_driver = { .name = "asus", .id_table = asus_devices, .report_fixup = asus_report_fixup, .probe = asus_probe, .remove = asus_remove, .input_mapping = asus_input_mapping, .input_configured = asus_input_configured, #ifdef CONFIG_PM .reset_resume = asus_reset_resume, .resume = asus_resume, #endif .event = asus_event, .raw_event = asus_raw_event }; module_hid_driver(asus_driver); MODULE_LICENSE("GPL");
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/fib_notifier.h> static unsigned int fib_notifier_net_id; struct fib_notifier_net { struct list_head fib_notifier_ops; struct atomic_notifier_head fib_chain; }; int call_fib_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib_notifier_info *info) { int err; err = nb->notifier_call(nb, event_type, info); return notifier_to_errno(err); } EXPORT_SYMBOL(call_fib_notifier); int call_fib_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); int err; err = atomic_notifier_call_chain(&fn_net->fib_chain, event_type, info); return notifier_to_errno(err); } EXPORT_SYMBOL(call_fib_notifiers); static unsigned int fib_seq_sum(struct net *net) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); struct fib_notifier_ops *ops; unsigned int fib_seq = 0; rcu_read_lock(); list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) { if (!try_module_get(ops->owner)) continue; fib_seq += ops->fib_seq_read(net); module_put(ops->owner); } rcu_read_unlock(); return fib_seq; } static int fib_net_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); struct fib_notifier_ops *ops; int err = 0; rcu_read_lock(); list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) { if (!try_module_get(ops->owner)) continue; err = ops->fib_dump(net, nb, extack); module_put(ops->owner); if (err) goto unlock; } unlock: rcu_read_unlock(); return err; } static bool fib_dump_is_consistent(struct net *net, struct notifier_block *nb, void (*cb)(struct notifier_block *nb), unsigned int fib_seq) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); atomic_notifier_chain_register(&fn_net->fib_chain, nb); if (fib_seq == fib_seq_sum(net)) return true; atomic_notifier_chain_unregister(&fn_net->fib_chain, nb); if (cb) cb(nb); return false; } #define FIB_DUMP_MAX_RETRIES 5 int register_fib_notifier(struct net *net, struct notifier_block *nb, void (*cb)(struct notifier_block *nb), struct netlink_ext_ack *extack) { int retries = 0; int err; do { unsigned int fib_seq = fib_seq_sum(net); err = fib_net_dump(net, nb, extack); if (err) return err; if (fib_dump_is_consistent(net, nb, cb, fib_seq)) return 0; } while (++retries < FIB_DUMP_MAX_RETRIES); return -EBUSY; } EXPORT_SYMBOL(register_fib_notifier); int unregister_fib_notifier(struct net *net, struct notifier_block *nb) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); return atomic_notifier_chain_unregister(&fn_net->fib_chain, nb); } EXPORT_SYMBOL(unregister_fib_notifier); static int __fib_notifier_ops_register(struct fib_notifier_ops *ops, struct net *net) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); struct fib_notifier_ops *o; list_for_each_entry(o, &fn_net->fib_notifier_ops, list) if (ops->family == o->family) return -EEXIST; list_add_tail_rcu(&ops->list, &fn_net->fib_notifier_ops); return 0; } struct fib_notifier_ops * fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net) { struct fib_notifier_ops *ops; int err; ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); if (!ops) return ERR_PTR(-ENOMEM); err = __fib_notifier_ops_register(ops, net); if (err) goto err_register; return ops; err_register: kfree(ops); return ERR_PTR(err); } EXPORT_SYMBOL(fib_notifier_ops_register); void fib_notifier_ops_unregister(struct fib_notifier_ops *ops) { list_del_rcu(&ops->list); kfree_rcu(ops, rcu); } EXPORT_SYMBOL(fib_notifier_ops_unregister); static int __net_init fib_notifier_net_init(struct net *net) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); INIT_LIST_HEAD(&fn_net->fib_notifier_ops); ATOMIC_INIT_NOTIFIER_HEAD(&fn_net->fib_chain); return 0; } static void __net_exit fib_notifier_net_exit(struct net *net) { struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id); WARN_ON_ONCE(!list_empty(&fn_net->fib_notifier_ops)); } static struct pernet_operations fib_notifier_net_ops = { .init = fib_notifier_net_init, .exit = fib_notifier_net_exit, .id = &fib_notifier_net_id, .size = sizeof(struct fib_notifier_net), }; static int __init fib_notifier_init(void) { return register_pernet_subsys(&fib_notifier_net_ops); } subsys_initcall(fib_notifier_init);
3200 3186 1559 3175 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 // SPDX-License-Identifier: GPL-2.0 /* * kobject.h - generic kernel object infrastructure. * * Copyright (c) 2002-2003 Patrick Mochel * Copyright (c) 2002-2003 Open Source Development Labs * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2008 Novell Inc. * * Please read Documentation/core-api/kobject.rst before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ #ifndef _KOBJECT_H_ #define _KOBJECT_H_ #include <linux/types.h> #include <linux/list.h> #include <linux/sysfs.h> #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/spinlock.h> #include <linux/kref.h> #include <linux/kobject_ns.h> #include <linux/wait.h> #include <linux/atomic.h> #include <linux/workqueue.h> #include <linux/uidgid.h> #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 64 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER /* path to the userspace helper executed on an event */ extern char uevent_helper[]; #endif /* counter to tag the uevent, read only except for the kobject core */ extern atomic64_t uevent_seqnum; /* * The actions here must match the index to the string array * in lib/kobject_uevent.c * * Do not add new actions here without checking with the driver-core * maintainers. Action strings are not meant to express subsystem * or device specific properties. In most cases you want to send a * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event * specific variables added to the event environment. */ enum kobject_action { KOBJ_ADD, KOBJ_REMOVE, KOBJ_CHANGE, KOBJ_MOVE, KOBJ_ONLINE, KOBJ_OFFLINE, KOBJ_BIND, KOBJ_UNBIND, }; struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; const struct kobj_type *ktype; struct kernfs_node *sd; /* sysfs directory entry */ struct kref kref; unsigned int state_initialized:1; unsigned int state_in_sysfs:1; unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; unsigned int uevent_suppress:1; #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; #endif }; __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); __printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs); static inline const char *kobject_name(const struct kobject *kobj) { return kobj->name; } void kobject_init(struct kobject *kobj, const struct kobj_type *ktype); __printf(3, 4) __must_check int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...); __printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...); void kobject_del(struct kobject *kobj); struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent); int __must_check kobject_rename(struct kobject *, const char *new_name); int __must_check kobject_move(struct kobject *, struct kobject *); struct kobject *kobject_get(struct kobject *kobj); struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj); void kobject_put(struct kobject *kobj); const void *kobject_namespace(const struct kobject *kobj); void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); char *kobject_get_path(const struct kobject *kobj, gfp_t flag); struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; const struct attribute_group **default_groups; const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj); const void *(*namespace)(const struct kobject *kobj); void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); }; struct kobj_uevent_env { char *argv[3]; char *envp[UEVENT_NUM_ENVP]; int envp_idx; char buf[UEVENT_BUFFER_SIZE]; int buflen; }; struct kset_uevent_ops { int (* const filter)(const struct kobject *kobj); const char *(* const name)(const struct kobject *kobj); int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env); }; struct kobj_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); }; extern const struct sysfs_ops kobj_sysfs_ops; struct sock; /** * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. * * A kset defines a group of kobjects. They can be individually * different "types" but overall these kobjects all want to be grouped * together and operated on in the same manner. ksets are used to * define the attribute callbacks and other common events that happen to * a kobject. * * @list: the list of all kobjects for this kset * @list_lock: a lock for iterating over the kobjects * @kobj: the embedded kobject for this kset (recursion, isn't it fun...) * @uevent_ops: the set of uevent operations for this kset. These are * called whenever a kobject has something happen to it so that the kset * can add new environment variables, or filter out the uevents if so * desired. */ struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } __randomize_layout; void kset_init(struct kset *kset); int __must_check kset_register(struct kset *kset); void kset_unregister(struct kset *kset); struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u, struct kobject *parent_kobj); static inline struct kset *to_kset(struct kobject *kobj) { return kobj ? container_of(kobj, struct kset, kobj) : NULL; } static inline struct kset *kset_get(struct kset *k) { return k ? to_kset(kobject_get(&k->kobj)) : NULL; } static inline void kset_put(struct kset *k) { kobject_put(&k->kobj); } static inline const struct kobj_type *get_ktype(const struct kobject *kobj) { return kobj->ktype; } struct kobject *kset_find_obj(struct kset *, const char *); /* The global /sys/kernel/ kobject for people to chain off of */ extern struct kobject *kernel_kobj; /* The global /sys/kernel/mm/ kobject for people to chain off of */ extern struct kobject *mm_kobj; /* The global /sys/hypervisor/ kobject for people to chain off of */ extern struct kobject *hypervisor_kobj; /* The global /sys/power/ kobject for people to chain off of */ extern struct kobject *power_kobj; /* The global /sys/firmware/ kobject for people to chain off of */ extern struct kobject *firmware_kobj; int kobject_uevent(struct kobject *kobj, enum kobject_action action); int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp[]); int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count); __printf(2, 3) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); #endif /* _KOBJECT_H_ */
223 2 11 11 102 102 100 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VMSTAT_H #define _LINUX_VMSTAT_H #include <linux/types.h> #include <linux/percpu.h> #include <linux/mmzone.h> #include <linux/vm_event_item.h> #include <linux/atomic.h> #include <linux/static_key.h> #include <linux/mmdebug.h> extern int sysctl_stat_interval; #ifdef CONFIG_NUMA #define ENABLE_NUMA_STAT 1 #define DISABLE_NUMA_STAT 0 extern int sysctl_vm_numa_stat; DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); #endif struct reclaim_stat { unsigned nr_dirty; unsigned nr_unqueued_dirty; unsigned nr_congested; unsigned nr_writeback; unsigned nr_immediate; unsigned nr_pageout; unsigned nr_activate[ANON_AND_FILE]; unsigned nr_ref_keep; unsigned nr_unmap_fail; unsigned nr_lazyfree_fail; unsigned nr_demoted; }; /* Stat data for system wide items */ enum vm_stat_item { NR_DIRTY_THRESHOLD, NR_DIRTY_BG_THRESHOLD, NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */ NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */ NR_VM_STAT_ITEMS, }; #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. * * Counters should only be incremented and no critical kernel component * should rely on the counter values. * * Counters are handled completely inline. On many platforms the code * generated will simply be the increment of a global address. */ struct vm_event_state { unsigned long event[NR_VM_EVENT_ITEMS]; }; DECLARE_PER_CPU(struct vm_event_state, vm_event_states); /* * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the * local_irq_disable overhead. */ static inline void __count_vm_event(enum vm_event_item item) { raw_cpu_inc(vm_event_states.event[item]); } static inline void count_vm_event(enum vm_event_item item) { this_cpu_inc(vm_event_states.event[item]); } static inline void __count_vm_events(enum vm_event_item item, long delta) { raw_cpu_add(vm_event_states.event[item], delta); } static inline void count_vm_events(enum vm_event_item item, long delta) { this_cpu_add(vm_event_states.event[item], delta); } extern void all_vm_events(unsigned long *); extern void vm_events_fold_cpu(int cpu); #else /* Disable counters */ static inline void count_vm_event(enum vm_event_item item) { } static inline void count_vm_events(enum vm_event_item item, long delta) { } static inline void __count_vm_event(enum vm_event_item item) { } static inline void __count_vm_events(enum vm_event_item item, long delta) { } static inline void all_vm_events(unsigned long *ret) { } static inline void vm_events_fold_cpu(int cpu) { } #endif /* CONFIG_VM_EVENT_COUNTERS */ #ifdef CONFIG_NUMA_BALANCING #define count_vm_numa_event(x) count_vm_event(x) #define count_vm_numa_events(x, y) count_vm_events(x, y) #else #define count_vm_numa_event(x) do {} while (0) #define count_vm_numa_events(x, y) do { (void)(y); } while (0) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_DEBUG_TLBFLUSH #define count_vm_tlb_event(x) count_vm_event(x) #define count_vm_tlb_events(x, y) count_vm_events(x, y) #else #define count_vm_tlb_event(x) do {} while (0) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif #ifdef CONFIG_PER_VMA_LOCK_STATS #define count_vm_vma_lock_event(x) count_vm_event(x) #else #define count_vm_vma_lock_event(x) do {} while (0) #endif #define __count_zid_vm_events(item, zid, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* * Zone and node-based page accounting with per cpu differentials. */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #ifdef CONFIG_NUMA static inline void zone_numa_event_add(long x, struct zone *zone, enum numa_stat_item item) { atomic_long_add(x, &zone->vm_numa_event[item]); atomic_long_add(x, &vm_numa_event[item]); } static inline unsigned long zone_numa_event_state(struct zone *zone, enum numa_stat_item item) { return atomic_long_read(&zone->vm_numa_event[item]); } static inline unsigned long global_numa_event_state(enum numa_stat_item item) { return atomic_long_read(&vm_numa_event[item]); } #endif /* CONFIG_NUMA */ static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_zone_stat[item]); } static inline void node_page_state_add(long x, struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_add(x, &pgdat->vm_stat[item]); atomic_long_add(x, &vm_node_stat[item]); } static inline unsigned long global_zone_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_zone_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } static inline unsigned long global_node_page_state_pages(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } static inline unsigned long global_node_page_state(enum node_stat_item item) { VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); return global_node_page_state_pages(item); } static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } /* * More accurate version that also considers the currently pending * deltas. For that we need to loop over all cpus to find the current * deltas. There is no synchronization so the result cannot be * exactly accurate either. */ static inline unsigned long zone_page_state_snapshot(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; #endif return x; } #ifdef CONFIG_NUMA /* See __count_vm_event comment on why raw_cpu_inc is used. */ static inline void __count_numa_event(struct zone *zone, enum numa_stat_item item) { struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; raw_cpu_inc(pzstats->vm_numa_event[item]); } static inline void __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) { struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; raw_cpu_add(pzstats->vm_numa_event[item], delta); } extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); extern unsigned long node_page_state_pages(struct pglist_data *pgdat, enum node_stat_item item); extern void fold_vm_numa_events(void); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) #define node_page_state_pages(node, item) global_node_page_state_pages(item) static inline void fold_vm_numa_events(void) { } #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); void __inc_node_page_state(struct page *, enum node_stat_item); void __dec_node_page_state(struct page *, enum node_stat_item); void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); void inc_node_page_state(struct page *, enum node_stat_item); void dec_node_page_state(struct page *, enum node_stat_item); extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void __inc_node_state(struct pglist_data *, enum node_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_node_state(struct pglist_data *, enum node_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp, loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)); #else /* CONFIG_SMP */ /* * We do not maintain differentials in a single processor configuration. * The functions directly modify the zone and global counters. */ static inline void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); } static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { if (vmstat_item_in_bytes(item)) { /* * Only cgroups use subpage accounting right now; at * the global level, these items still change in * multiples of whole pages. Store them as pages * internally to keep the per-cpu counters compact. */ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); delta >>= PAGE_SHIFT; } node_page_state_add(delta, pgdat, item); } static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); atomic_long_inc(&vm_zone_stat[item]); } static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_inc(&pgdat->vm_stat[item]); atomic_long_inc(&vm_node_stat[item]); } static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); atomic_long_dec(&vm_zone_stat[item]); } static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_dec(&pgdat->vm_stat[item]); atomic_long_dec(&vm_node_stat[item]); } static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); } static inline void __inc_node_page_state(struct page *page, enum node_stat_item item) { __inc_node_state(page_pgdat(page), item); } static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); } static inline void __dec_node_page_state(struct page *page, enum node_stat_item item) { __dec_node_state(page_pgdat(page), item); } /* * We only use atomic operations to update counters. So there is no need to * disable interrupts. */ #define inc_zone_page_state __inc_zone_page_state #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state #define inc_node_page_state __inc_node_page_state #define dec_node_page_state __dec_node_page_state #define mod_node_page_state __mod_node_page_state #define inc_zone_state __inc_zone_state #define inc_node_state __inc_node_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { } static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats) { } #endif /* CONFIG_SMP */ static inline void __zone_stat_mod_folio(struct folio *folio, enum zone_stat_item item, long nr) { __mod_zone_page_state(folio_zone(folio), item, nr); } static inline void __zone_stat_add_folio(struct folio *folio, enum zone_stat_item item) { __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); } static inline void __zone_stat_sub_folio(struct folio *folio, enum zone_stat_item item) { __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); } static inline void zone_stat_mod_folio(struct folio *folio, enum zone_stat_item item, long nr) { mod_zone_page_state(folio_zone(folio), item, nr); } static inline void zone_stat_add_folio(struct folio *folio, enum zone_stat_item item) { mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); } static inline void zone_stat_sub_folio(struct folio *folio, enum zone_stat_item item) { mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); } static inline void __node_stat_mod_folio(struct folio *folio, enum node_stat_item item, long nr) { __mod_node_page_state(folio_pgdat(folio), item, nr); } static inline void __node_stat_add_folio(struct folio *folio, enum node_stat_item item) { __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); } static inline void __node_stat_sub_folio(struct folio *folio, enum node_stat_item item) { __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); } static inline void node_stat_mod_folio(struct folio *folio, enum node_stat_item item, long nr) { mod_node_page_state(folio_pgdat(folio), item, nr); } static inline void node_stat_add_folio(struct folio *folio, enum node_stat_item item) { mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); } static inline void node_stat_sub_folio(struct folio *folio, enum node_stat_item item) { mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); } extern const char * const vmstat_text[]; static inline const char *zone_stat_name(enum zone_stat_item item) { return vmstat_text[item]; } #ifdef CONFIG_NUMA static inline const char *numa_stat_name(enum numa_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + item]; } #endif /* CONFIG_NUMA */ static inline const char *node_stat_name(enum node_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + item]; } static inline const char *lru_list_name(enum lru_list lru) { return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) static inline const char *vm_event_name(enum vm_event_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS + NR_VM_NODE_STAT_ITEMS + NR_VM_STAT_ITEMS + item]; } #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ #ifdef CONFIG_MEMCG void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { unsigned long flags; local_irq_save(flags); __mod_lruvec_state(lruvec, idx, val); local_irq_restore(flags); } void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val); static inline void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { unsigned long flags; local_irq_save(flags); __lruvec_stat_mod_folio(folio, idx, val); local_irq_restore(flags); } static inline void mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { lruvec_stat_mod_folio(page_folio(page), idx, val); } #else static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); } static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { mod_node_page_state(lruvec_pgdat(lruvec), idx, val); } static inline void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { __mod_node_page_state(folio_pgdat(folio), idx, val); } static inline void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { mod_node_page_state(folio_pgdat(folio), idx, val); } static inline void mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { mod_node_page_state(page_pgdat(page), idx, val); } #endif /* CONFIG_MEMCG */ static inline void __lruvec_stat_add_folio(struct folio *folio, enum node_stat_item idx) { __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); } static inline void __lruvec_stat_sub_folio(struct folio *folio, enum node_stat_item idx) { __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); } static inline void lruvec_stat_add_folio(struct folio *folio, enum node_stat_item idx) { lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); } static inline void lruvec_stat_sub_folio(struct folio *folio, enum node_stat_item idx) { lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); } void memmap_boot_pages_add(long delta); void memmap_pages_add(long delta); #endif /* _LINUX_VMSTAT_H */
10 9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 // SPDX-License-Identifier: GPL-2.0-only /* * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module * * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) * * Thanks to GENPIX for the sample code used to implement this module. * * This module is based off the vp7045 and vp702x modules */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gp8psk-fe.h" #include <media/dvb_frontend.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) struct gp8psk_fe_state { struct dvb_frontend fe; void *priv; const struct gp8psk_fe_ops *ops; bool is_rev1; u8 lock; u16 snr; unsigned long next_status_check; unsigned long status_check_interval; }; static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 status; st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1); return status & bmDCtuned; } static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) { struct gp8psk_fe_state *st = fe->demodulator_priv; return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0); } static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) { u8 buf[6]; if (time_after(jiffies,st->next_status_check)) { st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1); st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6); st->snr = (buf[1]) << 8 | buf[0]; st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; } return 0; } static int gp8psk_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); if (st->lock) *status = FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_SIGNAL | FE_HAS_CARRIER; else *status = 0; if (*status & FE_HAS_LOCK) st->status_check_interval = 1000; else st->status_check_interval = 100; return 0; } /* not supported by this Frontend */ static int gp8psk_fe_read_ber(struct dvb_frontend* fe, u32 *ber) { (void) fe; *ber = 0; return 0; } /* not supported by this Frontend */ static int gp8psk_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { (void) fe; *unc = 0; return 0; } static int gp8psk_fe_read_snr(struct dvb_frontend* fe, u16 *snr) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); /* snr is reported in dBu*256 */ *snr = st->snr; return 0; } static int gp8psk_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); /* snr is reported in dBu*256 */ /* snr / 38.4 ~= 100% strength */ /* snr * 17 returns 100% strength as 65535 */ if (st->snr > 0xf00) *strength = 0xffff; else *strength = (st->snr << 4) + st->snr; /* snr*17 */ return 0; } static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 cmd[10]; u32 freq = c->frequency * 1000; dprintk("%s()\n", __func__); cmd[4] = freq & 0xff; cmd[5] = (freq >> 8) & 0xff; cmd[6] = (freq >> 16) & 0xff; cmd[7] = (freq >> 24) & 0xff; /* backwards compatibility: DVB-S + 8-PSK were used for Turbo-FEC */ if (c->delivery_system == SYS_DVBS && c->modulation == PSK_8) c->delivery_system = SYS_TURBO; switch (c->delivery_system) { case SYS_DVBS: if (c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } c->fec_inner = FEC_AUTO; break; case SYS_DVBS2: /* kept for backwards compatibility */ dprintk("%s: DVB-S2 delivery system selected\n", __func__); break; case SYS_TURBO: dprintk("%s: Turbo-FEC delivery system selected\n", __func__); break; default: dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } cmd[0] = c->symbol_rate & 0xff; cmd[1] = (c->symbol_rate >> 8) & 0xff; cmd[2] = (c->symbol_rate >> 16) & 0xff; cmd[3] = (c->symbol_rate >> 24) & 0xff; switch (c->modulation) { case QPSK: if (st->is_rev1) if (gp8psk_tuned_to_DCII(fe)) st->ops->reload(st->priv); switch (c->fec_inner) { case FEC_1_2: cmd[9] = 0; break; case FEC_2_3: cmd[9] = 1; break; case FEC_3_4: cmd[9] = 2; break; case FEC_5_6: cmd[9] = 3; break; case FEC_7_8: cmd[9] = 4; break; case FEC_AUTO: cmd[9] = 5; break; default: cmd[9] = 5; break; } if (c->delivery_system == SYS_TURBO) cmd[8] = ADV_MOD_TURBO_QPSK; else cmd[8] = ADV_MOD_DVB_QPSK; break; case PSK_8: /* PSK_8 is for compatibility with DN */ cmd[8] = ADV_MOD_TURBO_8PSK; switch (c->fec_inner) { case FEC_2_3: cmd[9] = 0; break; case FEC_3_4: cmd[9] = 1; break; case FEC_3_5: cmd[9] = 2; break; case FEC_5_6: cmd[9] = 3; break; case FEC_8_9: cmd[9] = 4; break; default: cmd[9] = 0; break; } break; case QAM_16: /* QAM_16 is for compatibility with DN */ cmd[8] = ADV_MOD_TURBO_16QAM; cmd[9] = 0; break; default: /* Unknown modulation */ dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } if (st->is_rev1) gp8psk_set_tuner_mode(fe, 0); st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10); st->lock = 0; st->next_status_check = jiffies; st->status_check_interval = 200; return 0; } static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *m) { struct gp8psk_fe_state *st = fe->demodulator_priv; dprintk("%s\n", __func__); if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0, m->msg, m->msg_len)) { return -EINVAL; } return 0; } static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd; dprintk("%s\n", __func__); /* These commands are certainly wrong */ cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0, &cmd, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) { struct gp8psk_fe_state *st = fe->demodulator_priv; if (st->ops->out(st->priv, SET_22KHZ_TONE, (tone == SEC_TONE_ON), 0, NULL, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct gp8psk_fe_state *st = fe->demodulator_priv; if (st->ops->out(st->priv, SET_LNB_VOLTAGE, voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) { struct gp8psk_fe_state *st = fe->demodulator_priv; return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0); } static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd = sw_cmd & 0x7f; if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0)) return -EINVAL; if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), 0, NULL, 0)) return -EINVAL; return 0; } static void gp8psk_fe_release(struct dvb_frontend* fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; kfree(st); } static const struct dvb_frontend_ops gp8psk_fe_ops; struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, void *priv, bool is_rev1) { struct gp8psk_fe_state *st; if (!ops || !ops->in || !ops->out || !ops->reload) { pr_err("Error! gp8psk-fe ops not defined.\n"); return NULL; } st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); if (!st) return NULL; memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); st->fe.demodulator_priv = st; st->ops = ops; st->priv = priv; st->is_rev1 = is_rev1; pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : ""); return &st->fe; } EXPORT_SYMBOL_GPL(gp8psk_fe_attach); static const struct dvb_frontend_ops gp8psk_fe_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Genpix DVB-S", .frequency_min_hz = 800 * MHz, .frequency_max_hz = 2250 * MHz, .frequency_stepsize_hz = 100 * kHz, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | /* * FE_CAN_QAM_16 is for compatibility * (Myth incorrectly detects Turbo-QPSK as plain QAM-16) */ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_TURBO_FEC }, .release = gp8psk_fe_release, .init = NULL, .sleep = NULL, .set_frontend = gp8psk_fe_set_frontend, .get_tune_settings = gp8psk_fe_get_tune_settings, .read_status = gp8psk_fe_read_status, .read_ber = gp8psk_fe_read_ber, .read_signal_strength = gp8psk_fe_read_signal_strength, .read_snr = gp8psk_fe_read_snr, .read_ucblocks = gp8psk_fe_read_unc_blocks, .diseqc_send_master_cmd = gp8psk_fe_send_diseqc_msg, .diseqc_send_burst = gp8psk_fe_send_diseqc_burst, .set_tone = gp8psk_fe_set_tone, .set_voltage = gp8psk_fe_set_voltage, .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage }; MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); MODULE_VERSION("1.1"); MODULE_LICENSE("GPL");
119 120 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 // SPDX-License-Identifier: GPL-2.0-only #include <net/netdev_queues.h> #include <net/sock.h> #include <linux/ethtool_netlink.h> #include <linux/phy_link_topology.h> #include <linux/pm_runtime.h> #include "netlink.h" #include "module_fw.h" static struct genl_family ethtool_genl_family; static bool ethnl_ok __read_mostly; static u32 ethnl_bcast_seq; #define ETHTOOL_FLAGS_BASIC (ETHTOOL_FLAG_COMPACT_BITSETS | \ ETHTOOL_FLAG_OMIT_REPLY) #define ETHTOOL_FLAGS_STATS (ETHTOOL_FLAGS_BASIC | ETHTOOL_FLAG_STATS) const struct nla_policy ethnl_header_policy[] = { [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, .len = ALTIFNAMSIZ - 1 }, [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, ETHTOOL_FLAGS_BASIC), }; const struct nla_policy ethnl_header_policy_stats[] = { [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, .len = ALTIFNAMSIZ - 1 }, [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, ETHTOOL_FLAGS_STATS), }; const struct nla_policy ethnl_header_policy_phy[] = { [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, .len = ALTIFNAMSIZ - 1 }, [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, ETHTOOL_FLAGS_BASIC), [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1), }; const struct nla_policy ethnl_header_policy_phy_stats[] = { [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 }, [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING, .len = ALTIFNAMSIZ - 1 }, [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32, ETHTOOL_FLAGS_STATS), [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1), }; int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, enum ethnl_sock_type type) { struct ethnl_sock_priv *sk_priv; sk_priv = genl_sk_priv_get(&ethtool_genl_family, NETLINK_CB(skb).sk); if (IS_ERR(sk_priv)) return PTR_ERR(sk_priv); sk_priv->dev = dev; sk_priv->portid = portid; sk_priv->type = type; return 0; } static void ethnl_sock_priv_destroy(void *priv) { struct ethnl_sock_priv *sk_priv = priv; switch (sk_priv->type) { case ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH: ethnl_module_fw_flash_sock_destroy(sk_priv); break; default: break; } } int ethnl_ops_begin(struct net_device *dev) { int ret; if (!dev) return -ENODEV; if (dev->dev.parent) pm_runtime_get_sync(dev->dev.parent); if (!netif_device_present(dev) || dev->reg_state >= NETREG_UNREGISTERING) { ret = -ENODEV; goto err; } if (dev->ethtool_ops->begin) { ret = dev->ethtool_ops->begin(dev); if (ret) goto err; } return 0; err: if (dev->dev.parent) pm_runtime_put(dev->dev.parent); return ret; } void ethnl_ops_complete(struct net_device *dev) { if (dev->ethtool_ops->complete) dev->ethtool_ops->complete(dev); if (dev->dev.parent) pm_runtime_put(dev->dev.parent); } /** * ethnl_parse_header_dev_get() - parse request header * @req_info: structure to put results into * @header: nest attribute with request header * @net: request netns * @extack: netlink extack for error reporting * @require_dev: fail if no device identified in header * * Parse request header in nested attribute @nest and puts results into * the structure pointed to by @req_info. Extack from @info is used for error * reporting. If req_info->dev is not null on return, reference to it has * been taken. If error is returned, *req_info is null initialized and no * reference is held. * * Return: 0 on success or negative error code */ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, const struct nlattr *header, struct net *net, struct netlink_ext_ack *extack, bool require_dev) { struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy_phy)]; const struct nlattr *devname_attr; struct net_device *dev = NULL; u32 flags = 0; int ret; if (!header) { if (!require_dev) return 0; NL_SET_ERR_MSG(extack, "request header missing"); return -EINVAL; } /* No validation here, command policy should have a nested policy set * for the header, therefore validation should have already been done. */ ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy_phy) - 1, header, NULL, extack); if (ret < 0) return ret; if (tb[ETHTOOL_A_HEADER_FLAGS]) flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]); devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME]; if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) { u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]); dev = netdev_get_by_index(net, ifindex, &req_info->dev_tracker, GFP_KERNEL); if (!dev) { NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_DEV_INDEX], "no device matches ifindex"); return -ENODEV; } /* if both ifindex and ifname are passed, they must match */ if (devname_attr && strncmp(dev->name, nla_data(devname_attr), IFNAMSIZ)) { netdev_put(dev, &req_info->dev_tracker); NL_SET_ERR_MSG_ATTR(extack, header, "ifindex and name do not match"); return -ENODEV; } } else if (devname_attr) { dev = netdev_get_by_name(net, nla_data(devname_attr), &req_info->dev_tracker, GFP_KERNEL); if (!dev) { NL_SET_ERR_MSG_ATTR(extack, devname_attr, "no device matches name"); return -ENODEV; } } else if (require_dev) { NL_SET_ERR_MSG_ATTR(extack, header, "neither ifindex nor name specified"); return -EINVAL; } if (tb[ETHTOOL_A_HEADER_PHY_INDEX]) { if (dev) { req_info->phy_index = nla_get_u32(tb[ETHTOOL_A_HEADER_PHY_INDEX]); } else { NL_SET_ERR_MSG_ATTR(extack, header, "phy_index set without a netdev"); return -EINVAL; } } req_info->dev = dev; req_info->flags = flags; return 0; } struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, const struct nlattr *header, struct netlink_ext_ack *extack) { struct phy_device *phydev; ASSERT_RTNL(); if (!req_info->dev) return NULL; if (!req_info->phy_index) return req_info->dev->phydev; phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index); if (!phydev) { NL_SET_ERR_MSG_ATTR(extack, header, "no phy matching phyindex"); return ERR_PTR(-ENODEV); } return phydev; } /** * ethnl_fill_reply_header() - Put common header into a reply message * @skb: skb with the message * @dev: network device to describe in header * @attrtype: attribute type to use for the nest * * Create a nested attribute with attributes describing given network device. * * Return: 0 on success, error value (-EMSGSIZE only) on error */ int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev, u16 attrtype) { struct nlattr *nest; if (!dev) return 0; nest = nla_nest_start(skb, attrtype); if (!nest) return -EMSGSIZE; if (nla_put_u32(skb, ETHTOOL_A_HEADER_DEV_INDEX, (u32)dev->ifindex) || nla_put_string(skb, ETHTOOL_A_HEADER_DEV_NAME, dev->name)) goto nla_put_failure; /* If more attributes are put into reply header, ethnl_header_size() * must be updated to account for them. */ nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } /** * ethnl_reply_init() - Create skb for a reply and fill device identification * @payload: payload length (without netlink and genetlink header) * @dev: device the reply is about (may be null) * @cmd: ETHTOOL_MSG_* message type for reply * @hdr_attrtype: attribute type for common header * @info: genetlink info of the received packet we respond to * @ehdrp: place to store payload pointer returned by genlmsg_new() * * Return: pointer to allocated skb on success, NULL on error */ struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, u16 hdr_attrtype, struct genl_info *info, void **ehdrp) { struct sk_buff *skb; skb = genlmsg_new(payload, GFP_KERNEL); if (!skb) goto err; *ehdrp = genlmsg_put_reply(skb, info, &ethtool_genl_family, 0, cmd); if (!*ehdrp) goto err_free; if (dev) { int ret; ret = ethnl_fill_reply_header(skb, dev, hdr_attrtype); if (ret < 0) goto err_free; } return skb; err_free: nlmsg_free(skb); err: if (info) GENL_SET_ERR_MSG(info, "failed to setup reply message"); return NULL; } void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd) { return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &ethtool_genl_family, 0, cmd); } void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd) { return genlmsg_put(skb, 0, ++ethnl_bcast_seq, &ethtool_genl_family, 0, cmd); } void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd) { return genlmsg_put(skb, portid, seq, &ethtool_genl_family, 0, cmd); } int ethnl_multicast(struct sk_buff *skb, struct net_device *dev) { return genlmsg_multicast_netns(&ethtool_genl_family, dev_net(dev), skb, 0, ETHNL_MCGRP_MONITOR, GFP_KERNEL); } /* GET request helpers */ /** * struct ethnl_dump_ctx - context structure for generic dumpit() callback * @ops: request ops of currently processed message type * @req_info: parsed request header of processed request * @reply_data: data needed to compose the reply * @pos_ifindex: saved iteration position - ifindex * * These parameters are kept in struct netlink_callback as context preserved * between iterations. They are initialized by ethnl_default_start() and used * in ethnl_default_dumpit() and ethnl_default_done(). */ struct ethnl_dump_ctx { const struct ethnl_request_ops *ops; struct ethnl_req_info *req_info; struct ethnl_reply_data *reply_data; unsigned long pos_ifindex; }; static const struct ethnl_request_ops * ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_STRSET_GET] = &ethnl_strset_request_ops, [ETHTOOL_MSG_LINKINFO_GET] = &ethnl_linkinfo_request_ops, [ETHTOOL_MSG_LINKINFO_SET] = &ethnl_linkinfo_request_ops, [ETHTOOL_MSG_LINKMODES_GET] = &ethnl_linkmodes_request_ops, [ETHTOOL_MSG_LINKMODES_SET] = &ethnl_linkmodes_request_ops, [ETHTOOL_MSG_LINKSTATE_GET] = &ethnl_linkstate_request_ops, [ETHTOOL_MSG_DEBUG_GET] = &ethnl_debug_request_ops, [ETHTOOL_MSG_DEBUG_SET] = &ethnl_debug_request_ops, [ETHTOOL_MSG_WOL_GET] = &ethnl_wol_request_ops, [ETHTOOL_MSG_WOL_SET] = &ethnl_wol_request_ops, [ETHTOOL_MSG_FEATURES_GET] = &ethnl_features_request_ops, [ETHTOOL_MSG_PRIVFLAGS_GET] = &ethnl_privflags_request_ops, [ETHTOOL_MSG_PRIVFLAGS_SET] = &ethnl_privflags_request_ops, [ETHTOOL_MSG_RINGS_GET] = &ethnl_rings_request_ops, [ETHTOOL_MSG_RINGS_SET] = &ethnl_rings_request_ops, [ETHTOOL_MSG_CHANNELS_GET] = &ethnl_channels_request_ops, [ETHTOOL_MSG_CHANNELS_SET] = &ethnl_channels_request_ops, [ETHTOOL_MSG_COALESCE_GET] = &ethnl_coalesce_request_ops, [ETHTOOL_MSG_COALESCE_SET] = &ethnl_coalesce_request_ops, [ETHTOOL_MSG_PAUSE_GET] = &ethnl_pause_request_ops, [ETHTOOL_MSG_PAUSE_SET] = &ethnl_pause_request_ops, [ETHTOOL_MSG_EEE_GET] = &ethnl_eee_request_ops, [ETHTOOL_MSG_EEE_SET] = &ethnl_eee_request_ops, [ETHTOOL_MSG_FEC_GET] = &ethnl_fec_request_ops, [ETHTOOL_MSG_FEC_SET] = &ethnl_fec_request_ops, [ETHTOOL_MSG_TSINFO_GET] = &ethnl_tsinfo_request_ops, [ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops, [ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops, [ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops, [ETHTOOL_MSG_MODULE_GET] = &ethnl_module_request_ops, [ETHTOOL_MSG_MODULE_SET] = &ethnl_module_request_ops, [ETHTOOL_MSG_PSE_GET] = &ethnl_pse_request_ops, [ETHTOOL_MSG_PSE_SET] = &ethnl_pse_request_ops, [ETHTOOL_MSG_RSS_GET] = &ethnl_rss_request_ops, [ETHTOOL_MSG_PLCA_GET_CFG] = &ethnl_plca_cfg_request_ops, [ETHTOOL_MSG_PLCA_SET_CFG] = &ethnl_plca_cfg_request_ops, [ETHTOOL_MSG_PLCA_GET_STATUS] = &ethnl_plca_status_request_ops, [ETHTOOL_MSG_MM_GET] = &ethnl_mm_request_ops, [ETHTOOL_MSG_MM_SET] = &ethnl_mm_request_ops, [ETHTOOL_MSG_TSCONFIG_GET] = &ethnl_tsconfig_request_ops, [ETHTOOL_MSG_TSCONFIG_SET] = &ethnl_tsconfig_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) { return (struct ethnl_dump_ctx *)cb->ctx; } /** * ethnl_default_parse() - Parse request message * @req_info: pointer to structure to put data into * @info: genl_info from the request * @request_ops: struct request_ops for request type * @require_dev: fail if no device identified in header * * Parse universal request header and call request specific ->parse_request() * callback (if defined) to parse the rest of the message. * * Return: 0 on success or negative error code */ static int ethnl_default_parse(struct ethnl_req_info *req_info, const struct genl_info *info, const struct ethnl_request_ops *request_ops, bool require_dev) { struct nlattr **tb = info->attrs; int ret; ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr], genl_info_net(info), info->extack, require_dev); if (ret < 0) return ret; if (request_ops->parse_request) { ret = request_ops->parse_request(req_info, tb, info->extack); if (ret < 0) return ret; } return 0; } /** * ethnl_init_reply_data() - Initialize reply data for GET request * @reply_data: pointer to embedded struct ethnl_reply_data * @ops: instance of struct ethnl_request_ops describing the layout * @dev: network device to initialize the reply for * * Fills the reply data part with zeros and sets the dev member. Must be called * before calling the ->fill_reply() callback (for each iteration when handling * dump requests). */ static void ethnl_init_reply_data(struct ethnl_reply_data *reply_data, const struct ethnl_request_ops *ops, struct net_device *dev) { memset(reply_data, 0, ops->reply_data_size); reply_data->dev = dev; } /* default ->doit() handler for GET type requests */ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) { struct ethnl_reply_data *reply_data = NULL; struct ethnl_req_info *req_info = NULL; const u8 cmd = info->genlhdr->cmd; const struct ethnl_request_ops *ops; int hdr_len, reply_len; struct sk_buff *rskb; void *reply_payload; int ret; ops = ethnl_default_requests[cmd]; if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd)) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr)) return -EINVAL; req_info = kzalloc(ops->req_info_size, GFP_KERNEL); if (!req_info) return -ENOMEM; reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); if (!reply_data) { kfree(req_info); return -ENOMEM; } ret = ethnl_default_parse(req_info, info, ops, !ops->allow_nodev_do); if (ret < 0) goto err_dev; ethnl_init_reply_data(reply_data, ops, req_info->dev); rtnl_lock(); ret = ops->prepare_data(req_info, reply_data, info); rtnl_unlock(); if (ret < 0) goto err_cleanup; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; reply_len = ret; ret = -ENOMEM; rskb = ethnl_reply_init(reply_len + ethnl_reply_header_size(), req_info->dev, ops->reply_cmd, ops->hdr_attr, info, &reply_payload); if (!rskb) goto err_cleanup; hdr_len = rskb->len; ret = ops->fill_reply(rskb, req_info, reply_data); if (ret < 0) goto err_msg; WARN_ONCE(rskb->len - hdr_len > reply_len, "ethnl cmd %d: calculated reply length %d, but consumed %d\n", cmd, reply_len, rskb->len - hdr_len); if (ops->cleanup_data) ops->cleanup_data(reply_data); genlmsg_end(rskb, reply_payload); netdev_put(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return genlmsg_reply(rskb, info); err_msg: WARN_ONCE(ret == -EMSGSIZE, "calculated message payload length (%d) not sufficient\n", reply_len); nlmsg_free(rskb); err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); err_dev: netdev_put(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return ret; } static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, const struct ethnl_dump_ctx *ctx, const struct genl_info *info) { void *ehdr; int ret; ehdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ethtool_genl_family, NLM_F_MULTI, ctx->ops->reply_cmd); if (!ehdr) return -EMSGSIZE; ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); rtnl_lock(); ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info); rtnl_unlock(); if (ret < 0) goto out; ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr); if (ret < 0) goto out; ret = ctx->ops->fill_reply(skb, ctx->req_info, ctx->reply_data); out: if (ctx->ops->cleanup_data) ctx->ops->cleanup_data(ctx->reply_data); ctx->reply_data->dev = NULL; if (ret < 0) genlmsg_cancel(skb, ehdr); else genlmsg_end(skb, ehdr); return ret; } /* Default ->dumpit() handler for GET requests. */ static int ethnl_default_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); struct net *net = sock_net(skb->sk); struct net_device *dev; int ret = 0; rcu_read_lock(); for_each_netdev_dump(net, dev, ctx->pos_ifindex) { dev_hold(dev); rcu_read_unlock(); ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb)); rcu_read_lock(); dev_put(dev); if (ret < 0 && ret != -EOPNOTSUPP) { if (likely(skb->len)) ret = skb->len; break; } ret = 0; } rcu_read_unlock(); return ret; } /* generic ->start() handler for GET requests */ static int ethnl_default_start(struct netlink_callback *cb) { const struct genl_dumpit_info *info = genl_dumpit_info(cb); struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); struct ethnl_reply_data *reply_data; const struct ethnl_request_ops *ops; struct ethnl_req_info *req_info; struct genlmsghdr *ghdr; int ret; BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); ghdr = nlmsg_data(cb->nlh); ops = ethnl_default_requests[ghdr->cmd]; if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", ghdr->cmd)) return -EOPNOTSUPP; req_info = kzalloc(ops->req_info_size, GFP_KERNEL); if (!req_info) return -ENOMEM; reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); if (!reply_data) { ret = -ENOMEM; goto free_req_info; } ret = ethnl_default_parse(req_info, &info->info, ops, false); if (req_info->dev) { /* We ignore device specification in dump requests but as the * same parser as for non-dump (doit) requests is used, it * would take reference to the device if it finds one */ netdev_put(req_info->dev, &req_info->dev_tracker); req_info->dev = NULL; } if (ret < 0) goto free_reply_data; ctx->ops = ops; ctx->req_info = req_info; ctx->reply_data = reply_data; ctx->pos_ifindex = 0; return 0; free_reply_data: kfree(reply_data); free_req_info: kfree(req_info); return ret; } /* default ->done() handler for GET requests */ static int ethnl_default_done(struct netlink_callback *cb) { struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); kfree(ctx->reply_data); kfree(ctx->req_info); return 0; } static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info) { const struct ethnl_request_ops *ops; struct ethnl_req_info req_info = {}; const u8 cmd = info->genlhdr->cmd; struct net_device *dev; int ret; ops = ethnl_default_requests[cmd]; if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd)) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr)) return -EINVAL; ret = ethnl_parse_header_dev_get(&req_info, info->attrs[ops->hdr_attr], genl_info_net(info), info->extack, true); if (ret < 0) return ret; if (ops->set_validate) { ret = ops->set_validate(&req_info, info); /* 0 means nothing to do */ if (ret <= 0) goto out_dev; } dev = req_info.dev; rtnl_lock(); dev->cfg_pending = kmemdup(dev->cfg, sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT); if (!dev->cfg_pending) { ret = -ENOMEM; goto out_tie_cfg; } ret = ethnl_ops_begin(dev); if (ret < 0) goto out_free_cfg; ret = ops->set(&req_info, info); if (ret < 0) goto out_ops; swap(dev->cfg, dev->cfg_pending); if (!ret) goto out_ops; ethtool_notify(dev, ops->set_ntf_cmd, NULL); ret = 0; out_ops: ethnl_ops_complete(dev); out_free_cfg: kfree(dev->cfg_pending); out_tie_cfg: dev->cfg_pending = dev->cfg; rtnl_unlock(); out_dev: ethnl_parse_header_dev_put(&req_info); return ret; } static const struct ethnl_request_ops * ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = { [ETHTOOL_MSG_LINKINFO_NTF] = &ethnl_linkinfo_request_ops, [ETHTOOL_MSG_LINKMODES_NTF] = &ethnl_linkmodes_request_ops, [ETHTOOL_MSG_DEBUG_NTF] = &ethnl_debug_request_ops, [ETHTOOL_MSG_WOL_NTF] = &ethnl_wol_request_ops, [ETHTOOL_MSG_FEATURES_NTF] = &ethnl_features_request_ops, [ETHTOOL_MSG_PRIVFLAGS_NTF] = &ethnl_privflags_request_ops, [ETHTOOL_MSG_RINGS_NTF] = &ethnl_rings_request_ops, [ETHTOOL_MSG_CHANNELS_NTF] = &ethnl_channels_request_ops, [ETHTOOL_MSG_COALESCE_NTF] = &ethnl_coalesce_request_ops, [ETHTOOL_MSG_PAUSE_NTF] = &ethnl_pause_request_ops, [ETHTOOL_MSG_EEE_NTF] = &ethnl_eee_request_ops, [ETHTOOL_MSG_FEC_NTF] = &ethnl_fec_request_ops, [ETHTOOL_MSG_MODULE_NTF] = &ethnl_module_request_ops, [ETHTOOL_MSG_PLCA_NTF] = &ethnl_plca_cfg_request_ops, [ETHTOOL_MSG_MM_NTF] = &ethnl_mm_request_ops, }; /* default notification handler */ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, const void *data) { struct ethnl_reply_data *reply_data; const struct ethnl_request_ops *ops; struct ethnl_req_info *req_info; struct genl_info info; struct sk_buff *skb; void *reply_payload; int reply_len; int ret; genl_info_init_ntf(&info, &ethtool_genl_family, cmd); if (WARN_ONCE(cmd > ETHTOOL_MSG_KERNEL_MAX || !ethnl_default_notify_ops[cmd], "unexpected notification type %u\n", cmd)) return; ops = ethnl_default_notify_ops[cmd]; req_info = kzalloc(ops->req_info_size, GFP_KERNEL); if (!req_info) return; reply_data = kmalloc(ops->reply_data_size, GFP_KERNEL); if (!reply_data) { kfree(req_info); return; } req_info->dev = dev; req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS; ethnl_init_reply_data(reply_data, ops, dev); ret = ops->prepare_data(req_info, reply_data, &info); if (ret < 0) goto err_cleanup; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; reply_len = ret + ethnl_reply_header_size(); skb = genlmsg_new(reply_len, GFP_KERNEL); if (!skb) goto err_cleanup; reply_payload = ethnl_bcastmsg_put(skb, cmd); if (!reply_payload) goto err_skb; ret = ethnl_fill_reply_header(skb, dev, ops->hdr_attr); if (ret < 0) goto err_msg; ret = ops->fill_reply(skb, req_info, reply_data); if (ret < 0) goto err_msg; if (ops->cleanup_data) ops->cleanup_data(reply_data); genlmsg_end(skb, reply_payload); kfree(reply_data); kfree(req_info); ethnl_multicast(skb, dev); return; err_msg: WARN_ONCE(ret == -EMSGSIZE, "calculated message payload length (%d) not sufficient\n", reply_len); err_skb: nlmsg_free(skb); err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); kfree(reply_data); kfree(req_info); return; } /* notifications */ typedef void (*ethnl_notify_handler_t)(struct net_device *dev, unsigned int cmd, const void *data); static const ethnl_notify_handler_t ethnl_notify_handlers[] = { [ETHTOOL_MSG_LINKINFO_NTF] = ethnl_default_notify, [ETHTOOL_MSG_LINKMODES_NTF] = ethnl_default_notify, [ETHTOOL_MSG_DEBUG_NTF] = ethnl_default_notify, [ETHTOOL_MSG_WOL_NTF] = ethnl_default_notify, [ETHTOOL_MSG_FEATURES_NTF] = ethnl_default_notify, [ETHTOOL_MSG_PRIVFLAGS_NTF] = ethnl_default_notify, [ETHTOOL_MSG_RINGS_NTF] = ethnl_default_notify, [ETHTOOL_MSG_CHANNELS_NTF] = ethnl_default_notify, [ETHTOOL_MSG_COALESCE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_PAUSE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_FEC_NTF] = ethnl_default_notify, [ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify, [ETHTOOL_MSG_MM_NTF] = ethnl_default_notify, }; void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data) { if (unlikely(!ethnl_ok)) return; ASSERT_RTNL(); if (likely(cmd < ARRAY_SIZE(ethnl_notify_handlers) && ethnl_notify_handlers[cmd])) ethnl_notify_handlers[cmd](dev, cmd, data); else WARN_ONCE(1, "notification %u not implemented (dev=%s)\n", cmd, netdev_name(dev)); } EXPORT_SYMBOL(ethtool_notify); static void ethnl_notify_features(struct netdev_notifier_info *info) { struct net_device *dev = netdev_notifier_info_to_dev(info); ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL); } static int ethnl_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netdev_notifier_info *info = ptr; struct netlink_ext_ack *extack; struct net_device *dev; dev = netdev_notifier_info_to_dev(info); extack = netdev_notifier_info_to_extack(info); switch (event) { case NETDEV_FEAT_CHANGE: ethnl_notify_features(ptr); break; case NETDEV_PRE_UP: if (dev->ethtool->module_fw_flash_in_progress) { NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware"); return NOTIFY_BAD; } } return NOTIFY_DONE; } static struct notifier_block ethnl_netdev_notifier = { .notifier_call = ethnl_netdev_event, }; /* genetlink setup */ static const struct genl_ops ethtool_genl_ops[] = { { .cmd = ETHTOOL_MSG_STRSET_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_strset_get_policy, .maxattr = ARRAY_SIZE(ethnl_strset_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_LINKINFO_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_linkinfo_get_policy, .maxattr = ARRAY_SIZE(ethnl_linkinfo_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_LINKINFO_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_linkinfo_set_policy, .maxattr = ARRAY_SIZE(ethnl_linkinfo_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_LINKMODES_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_linkmodes_get_policy, .maxattr = ARRAY_SIZE(ethnl_linkmodes_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_LINKMODES_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_linkmodes_set_policy, .maxattr = ARRAY_SIZE(ethnl_linkmodes_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_LINKSTATE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_linkstate_get_policy, .maxattr = ARRAY_SIZE(ethnl_linkstate_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_DEBUG_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_debug_get_policy, .maxattr = ARRAY_SIZE(ethnl_debug_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_DEBUG_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_debug_set_policy, .maxattr = ARRAY_SIZE(ethnl_debug_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_WOL_GET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_wol_get_policy, .maxattr = ARRAY_SIZE(ethnl_wol_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_WOL_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_wol_set_policy, .maxattr = ARRAY_SIZE(ethnl_wol_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_FEATURES_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_features_get_policy, .maxattr = ARRAY_SIZE(ethnl_features_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_FEATURES_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_set_features, .policy = ethnl_features_set_policy, .maxattr = ARRAY_SIZE(ethnl_features_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_PRIVFLAGS_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_privflags_get_policy, .maxattr = ARRAY_SIZE(ethnl_privflags_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_PRIVFLAGS_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_privflags_set_policy, .maxattr = ARRAY_SIZE(ethnl_privflags_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_RINGS_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_rings_get_policy, .maxattr = ARRAY_SIZE(ethnl_rings_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_RINGS_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_rings_set_policy, .maxattr = ARRAY_SIZE(ethnl_rings_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_CHANNELS_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_channels_get_policy, .maxattr = ARRAY_SIZE(ethnl_channels_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_CHANNELS_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_channels_set_policy, .maxattr = ARRAY_SIZE(ethnl_channels_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_COALESCE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_coalesce_get_policy, .maxattr = ARRAY_SIZE(ethnl_coalesce_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_COALESCE_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_coalesce_set_policy, .maxattr = ARRAY_SIZE(ethnl_coalesce_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_PAUSE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_pause_get_policy, .maxattr = ARRAY_SIZE(ethnl_pause_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_PAUSE_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_pause_set_policy, .maxattr = ARRAY_SIZE(ethnl_pause_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_EEE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_eee_get_policy, .maxattr = ARRAY_SIZE(ethnl_eee_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_EEE_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_eee_set_policy, .maxattr = ARRAY_SIZE(ethnl_eee_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_TSINFO_GET, .doit = ethnl_default_doit, .start = ethnl_tsinfo_start, .dumpit = ethnl_tsinfo_dumpit, .done = ethnl_tsinfo_done, .policy = ethnl_tsinfo_get_policy, .maxattr = ARRAY_SIZE(ethnl_tsinfo_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_CABLE_TEST_ACT, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_act_cable_test, .policy = ethnl_cable_test_act_policy, .maxattr = ARRAY_SIZE(ethnl_cable_test_act_policy) - 1, }, { .cmd = ETHTOOL_MSG_CABLE_TEST_TDR_ACT, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_act_cable_test_tdr, .policy = ethnl_cable_test_tdr_act_policy, .maxattr = ARRAY_SIZE(ethnl_cable_test_tdr_act_policy) - 1, }, { .cmd = ETHTOOL_MSG_TUNNEL_INFO_GET, .doit = ethnl_tunnel_info_doit, .start = ethnl_tunnel_info_start, .dumpit = ethnl_tunnel_info_dumpit, .policy = ethnl_tunnel_info_get_policy, .maxattr = ARRAY_SIZE(ethnl_tunnel_info_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_FEC_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_fec_get_policy, .maxattr = ARRAY_SIZE(ethnl_fec_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_FEC_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_fec_set_policy, .maxattr = ARRAY_SIZE(ethnl_fec_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_MODULE_EEPROM_GET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_module_eeprom_get_policy, .maxattr = ARRAY_SIZE(ethnl_module_eeprom_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_STATS_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_stats_get_policy, .maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_phc_vclocks_get_policy, .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_MODULE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_module_get_policy, .maxattr = ARRAY_SIZE(ethnl_module_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_MODULE_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_module_set_policy, .maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_PSE_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_pse_get_policy, .maxattr = ARRAY_SIZE(ethnl_pse_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_PSE_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_pse_set_policy, .maxattr = ARRAY_SIZE(ethnl_pse_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_RSS_GET, .doit = ethnl_default_doit, .start = ethnl_rss_dump_start, .dumpit = ethnl_rss_dumpit, .policy = ethnl_rss_get_policy, .maxattr = ARRAY_SIZE(ethnl_rss_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_PLCA_GET_CFG, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_plca_get_cfg_policy, .maxattr = ARRAY_SIZE(ethnl_plca_get_cfg_policy) - 1, }, { .cmd = ETHTOOL_MSG_PLCA_SET_CFG, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_plca_set_cfg_policy, .maxattr = ARRAY_SIZE(ethnl_plca_set_cfg_policy) - 1, }, { .cmd = ETHTOOL_MSG_PLCA_GET_STATUS, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_plca_get_status_policy, .maxattr = ARRAY_SIZE(ethnl_plca_get_status_policy) - 1, }, { .cmd = ETHTOOL_MSG_MM_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_mm_get_policy, .maxattr = ARRAY_SIZE(ethnl_mm_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_MM_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_mm_set_policy, .maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1, }, { .cmd = ETHTOOL_MSG_MODULE_FW_FLASH_ACT, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_act_module_fw_flash, .policy = ethnl_module_fw_flash_act_policy, .maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1, }, { .cmd = ETHTOOL_MSG_PHY_GET, .doit = ethnl_phy_doit, .start = ethnl_phy_start, .dumpit = ethnl_phy_dumpit, .done = ethnl_phy_done, .policy = ethnl_phy_get_policy, .maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_TSCONFIG_GET, .doit = ethnl_default_doit, .start = ethnl_default_start, .dumpit = ethnl_default_dumpit, .done = ethnl_default_done, .policy = ethnl_tsconfig_get_policy, .maxattr = ARRAY_SIZE(ethnl_tsconfig_get_policy) - 1, }, { .cmd = ETHTOOL_MSG_TSCONFIG_SET, .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_default_set_doit, .policy = ethnl_tsconfig_set_policy, .maxattr = ARRAY_SIZE(ethnl_tsconfig_set_policy) - 1, }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { [ETHNL_MCGRP_MONITOR] = { .name = ETHTOOL_MCGRP_MONITOR_NAME }, }; static struct genl_family ethtool_genl_family __ro_after_init = { .name = ETHTOOL_GENL_NAME, .version = ETHTOOL_GENL_VERSION, .netnsok = true, .parallel_ops = true, .ops = ethtool_genl_ops, .n_ops = ARRAY_SIZE(ethtool_genl_ops), .resv_start_op = ETHTOOL_MSG_MODULE_GET + 1, .mcgrps = ethtool_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps), .sock_priv_size = sizeof(struct ethnl_sock_priv), .sock_priv_destroy = ethnl_sock_priv_destroy, }; /* module setup */ static int __init ethnl_init(void) { int ret; ret = genl_register_family(&ethtool_genl_family); if (WARN(ret < 0, "ethtool: genetlink family registration failed")) return ret; ethnl_ok = true; ret = register_netdevice_notifier(&ethnl_netdev_notifier); WARN(ret < 0, "ethtool: net device notifier registration failed"); return ret; } subsys_initcall(ethnl_init);
11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 12 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 9 10 10 10 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 10 10 10 11 11 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI core. */ #include <linux/export.h> #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> #include <linux/kcov.h> #include <linux/property.h> #include <linux/suspend.h> #include <linux/wait.h> #include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "hci_debugfs.h" #include "smp.h" #include "leds.h" #include "msft.h" #include "aosp.h" #include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); static void hci_tx_work(struct work_struct *work); /* HCI device list */ LIST_HEAD(hci_dev_list); DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); /* Get HCI device by index. * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { struct hci_dev *hdev = NULL, *d; BT_DBG("%d", index); if (index < 0) return NULL; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); break; } } read_unlock(&hci_dev_list_lock); return hdev; } /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; switch (discov->state) { case DISCOVERY_FINDING: case DISCOVERY_RESOLVING: return true; default: return false; } } void hci_discovery_set_state(struct hci_dev *hdev, int state) { int old_state = hdev->discovery.state; if (old_state == state) return; hdev->discovery.state = state; switch (state) { case DISCOVERY_STOPPED: hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); break; case DISCOVERY_STARTING: break; case DISCOVERY_FINDING: mgmt_discovering(hdev, 1); break; case DISCOVERY_RESOLVING: break; case DISCOVERY_STOPPING: break; } bt_dev_dbg(hdev, "state %u -> %u", old_state, state); } void hci_inquiry_cache_flush(struct hci_dev *hdev) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *p, *n; list_for_each_entry_safe(p, n, &cache->all, all) { list_del(&p->all); kfree(p); } INIT_LIST_HEAD(&cache->unknown); INIT_LIST_HEAD(&cache->resolve); } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->all, all) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->unknown, list) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); list_for_each_entry(e, &cache->resolve, list) { if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) return e; if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie) { struct discovery_state *cache = &hdev->discovery; struct list_head *pos = &cache->resolve; struct inquiry_entry *p; list_del(&ie->list); list_for_each_entry(p, &cache->resolve, list) { if (p->name_state != NAME_PENDING && abs(p->data.rssi) >= abs(ie->data.rssi)) break; pos = &p->list; } list_add(&ie->list, pos); } u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *ie; u32 flags = 0; BT_DBG("cache %p, %pMR", cache, &data->bdaddr); hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); if (!data->ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); if (ie) { if (!ie->data.ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; if (ie->name_state == NAME_NEEDED && data->rssi != ie->data.rssi) { ie->data.rssi = data->rssi; hci_inquiry_cache_update_resolve(hdev, ie); } goto update; } /* Entry not in the cache. Add new one. */ ie = kzalloc(sizeof(*ie), GFP_KERNEL); if (!ie) { flags |= MGMT_DEV_FOUND_CONFIRM_NAME; goto done; } list_add(&ie->all, &cache->all); if (name_known) { ie->name_state = NAME_KNOWN; } else { ie->name_state = NAME_NOT_KNOWN; list_add(&ie->list, &cache->unknown); } update: if (name_known && ie->name_state != NAME_KNOWN && ie->name_state != NAME_PENDING) { ie->name_state = NAME_KNOWN; list_del(&ie->list); } memcpy(&ie->data, data, sizeof(*data)); ie->timestamp = jiffies; cache->timestamp = jiffies; if (ie->name_state == NAME_NOT_KNOWN) flags |= MGMT_DEV_FOUND_CONFIRM_NAME; done: return flags; } static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) { struct discovery_state *cache = &hdev->discovery; struct inquiry_info *info = (struct inquiry_info *) buf; struct inquiry_entry *e; int copied = 0; list_for_each_entry(e, &cache->all, all) { struct inquiry_data *data = &e->data; if (copied >= num) break; bacpy(&info->bdaddr, &data->bdaddr); info->pscan_rep_mode = data->pscan_rep_mode; info->pscan_period_mode = data->pscan_period_mode; info->pscan_mode = data->pscan_mode; memcpy(info->dev_class, data->dev_class, 3); info->clock_offset = data->clock_offset; info++; copied++; } BT_DBG("cache %p, copied %d", cache, copied); return copied; } int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) return -EFAULT; hdev = hci_dev_get(ir.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } /* Restrict maximum inquiry length to 60 seconds */ if (ir.length > 60) { err = -EINVAL; goto done; } hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { hci_inquiry_cache_flush(hdev); do_inquiry = 1; } hci_dev_unlock(hdev); if (do_inquiry) { hci_req_sync_lock(hdev); err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); hci_req_sync_unlock(hdev); if (err < 0) goto done; /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, TASK_INTERRUPTIBLE)) { err = -EINTR; goto done; } } /* for unlimited number of responses we will use buffer with * 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space. */ buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); if (!buf) { err = -ENOMEM; goto done; } hci_dev_lock(hdev); ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); hci_dev_unlock(hdev); BT_DBG("num_rsp %d", ir.num_rsp); if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; kfree(buf); done: hci_dev_put(hdev); return err; } static int hci_dev_do_open(struct hci_dev *hdev) { int ret = 0; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); ret = hci_dev_open_sync(hdev); hci_req_sync_unlock(hdev); return ret; } /* ---- HCI ioctl helpers ---- */ int hci_dev_open(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; /* Devices that are marked as unconfigured can only be powered * up as user channel. Trying to bring them up as normal devices * will result into a failure. Only user channel operation is * possible. * * When this function is called for a user channel, the flag * HCI_USER_CHANNEL will be set first before attempting to * open the device. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EOPNOTSUPP; goto done; } /* We need to ensure that no other power on/off work is pending * before proceeding to call hci_dev_do_open. This is * particularly important if the setup procedure has not yet * completed. */ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); /* After this call it is guaranteed that the setup procedure * has finished. This means that error conditions like RFKILL * or no valid public or static random address apply. */ flush_workqueue(hdev->req_workqueue); /* For controllers not using the management interface and that * are brought up using legacy ioctl, set the HCI_BONDABLE bit * so that pairing works for them. Once the management interface * is in use this bit will be cleared again and userspace has * to explicitly enable it. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !hci_dev_test_flag(hdev, HCI_MGMT)) hci_dev_set_flag(hdev, HCI_BONDABLE); err = hci_dev_do_open(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_do_close(struct hci_dev *hdev) { int err; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); return err; } int hci_dev_close(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); err = hci_dev_do_close(hdev); done: hci_dev_put(hdev); return err; } static int hci_dev_do_reset(struct hci_dev *hdev) { int ret; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); /* Cancel these to avoid queueing non-chained pending work */ hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); /* Wait for * * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) * queue_delayed_work(&hdev->{cmd,ncmd}_timer) * * inside RCU section to see the flag or complete scheduling. */ synchronize_rcu(); /* Explicitly cancel works in case scheduled after setting the flag. */ cancel_delayed_work(&hdev->cmd_timer); cancel_delayed_work(&hdev->ncmd_timer); /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); if (hdev->flush) hdev->flush(hdev); hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; hdev->iso_cnt = 0; ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; } int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto done; } if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } err = hci_dev_do_reset(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_reset_stat(__u16 dev) { struct hci_dev *hdev; int ret = 0; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { ret = -EOPNOTSUPP; goto done; } memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); done: hci_dev_put(hdev); return ret; } static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; BT_DBG("%s scan 0x%02x", hdev->name, scan); if ((scan & SCAN_PAGE)) conn_changed = !hci_dev_test_and_set_flag(hdev, HCI_CONNECTABLE); else conn_changed = hci_dev_test_and_clear_flag(hdev, HCI_CONNECTABLE); if ((scan & SCAN_INQUIRY)) { discov_changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE); } else { hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); discov_changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE); } if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; if (conn_changed || discov_changed) { /* In case this was disabled through mgmt */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) hci_update_adv_data(hdev, hdev->cur_adv_instance); mgmt_new_settings(hdev); } } int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; __le16 policy; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) return -EFAULT; hdev = hci_dev_get(dr.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } switch (cmd) { case HCISETAUTH: err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) { err = -EOPNOTSUPP; break; } if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &dr.dev_opt, HCI_CMD_TIMEOUT); if (err) break; } err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &dr.dev_opt, HCI_CMD_TIMEOUT); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. */ if (!err) hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: policy = cpu_to_le16(dr.dev_opt); err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); break; case HCISETPTYPE: if (hdev->pkt_type == (__u16) dr.dev_opt) break; hdev->pkt_type = (__u16) dr.dev_opt; mgmt_phy_configuration_changed(hdev, NULL); break; case HCISETACLMTU: hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); break; case HCISETSCOMTU: hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); break; default: err = -EINVAL; break; } done: hci_dev_put(hdev); return err; } int hci_get_dev_list(void __user *arg) { struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; int n = 0, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; dl->dev_num = dev_num; dr = dl->dev_req; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { unsigned long flags = hdev->flags; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); dr[n].dev_id = hdev->id; dr[n].dev_opt = flags; if (++n >= dev_num) break; } read_unlock(&hci_dev_list_lock); dl->dev_num = n; err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); kfree(dl); return err ? -EFAULT : 0; } int hci_get_dev_info(void __user *arg) { struct hci_dev *hdev; struct hci_dev_info di; unsigned long flags; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; hdev = hci_dev_get(di.dev_id); if (!hdev) return -ENODEV; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags = hdev->flags & ~BIT(HCI_UP); else flags = hdev->flags; strscpy(di.name, hdev->name, sizeof(di.name)); di.bdaddr = hdev->bdaddr; di.type = (hdev->bus & 0x0f); di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; } else { di.acl_mtu = hdev->le_mtu; di.acl_pkts = hdev->le_pkts; di.sco_mtu = 0; di.sco_pkts = 0; } di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err; } /* ---- Interface to HCI drivers ---- */ static int hci_dev_do_poweroff(struct hci_dev *hdev) { int err; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); err = hci_set_powered_sync(hdev, false); hci_req_sync_unlock(hdev); return err; } static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; int err; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) return 0; if (blocked) { hci_dev_set_flag(hdev, HCI_RFKILLED); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { err = hci_dev_do_poweroff(hdev); if (err) { bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", err); /* Make sure the device is still closed even if * anything during power off sequence (eg. * disconnecting devices) failed. */ hci_dev_do_close(hdev); } } } else { hci_dev_clear_flag(hdev, HCI_RFKILLED); } return 0; } static const struct rfkill_ops hci_rfkill_ops = { .set_block = hci_rfkill_set_block, }; static void hci_power_on(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); int err; BT_DBG("%s", hdev->name); if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } err = hci_dev_do_open(hdev); if (err < 0) { hci_dev_lock(hdev); mgmt_set_powered_failed(hdev, err); hci_dev_unlock(hdev); return; } /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to turn the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } } static void hci_power_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_off.work); BT_DBG("%s", hdev->name); hci_dev_do_close(hdev); } static void hci_error_reset(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); hci_dev_hold(hdev); BT_DBG("%s", hdev->name); if (hdev->hw_error) hdev->hw_error(hdev, hdev->hw_error_code); else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); if (!hci_dev_do_close(hdev)) hci_dev_do_open(hdev); hci_dev_put(hdev); } void hci_uuids_clear(struct hci_dev *hdev) { struct bt_uuid *uuid, *tmp; list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { list_del(&uuid->list); kfree(uuid); } } void hci_link_keys_clear(struct hci_dev *hdev) { struct link_key *key, *tmp; list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } } void hci_smp_ltks_clear(struct hci_dev *hdev) { struct smp_ltk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_smp_irks_clear(struct hci_dev *hdev) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_blocked_keys_clear(struct hci_dev *hdev) { struct blocked_key *b, *tmp; list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { list_del_rcu(&b->list); kfree_rcu(b, rcu); } } bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) { bool blocked = false; struct blocked_key *b; rcu_read_lock(); list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { blocked = true; break; } } rcu_read_unlock(); return blocked; } struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->link_keys, list) { if (bacmp(bdaddr, &k->bdaddr) == 0) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LINKKEY, k->val)) { bt_dev_warn_ratelimited(hdev, "Link key blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) return true; /* Debug keys are insecure so don't store them persistently */ if (key_type == HCI_LK_DEBUG_COMBINATION) return false; /* Changed combination key and there's no previous one */ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) return false; /* Security mode 3 case */ if (!conn) return true; /* BR/EDR key derived using SC from an LE link */ if (conn->type == LE_LINK) return true; /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) return true; /* Local side had dedicated bonding as requirement */ if (conn->auth_type == 0x02 || conn->auth_type == 0x03) return true; /* Remote side had dedicated bonding as requirement */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) return true; /* If none of the above criteria match, then don't store the key * persistently */ return false; } static u8 ltk_role(u8 type) { if (type == SMP_LTK) return HCI_ROLE_MASTER; return HCI_ROLE_SLAVE; } struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role) { struct smp_ltk *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) continue; if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, k->val)) { bt_dev_warn_ratelimited(hdev, "LTK blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (!bacmp(&irk->rpa, rpa)) { irk_to_return = irk; goto done; } } list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; /* Identity Address must be public or static random */ if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) return NULL; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (addr_type == irk->addr_type && bacmp(bdaddr, &irk->bdaddr) == 0) { irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent) { struct link_key *key, *old_key; u8 old_key_type; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { old_key_type = old_key->type; key = old_key; } else { old_key_type = conn ? conn->key_type : 0xff; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->link_keys); } BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); /* Some buggy controller combinations generate a changed * combination key for legacy pairing even when there's no * previous key */ if (type == HCI_LK_CHANGED_COMBINATION && (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { type = HCI_LK_COMBINATION; if (conn) conn->key_type = type; } bacpy(&key->bdaddr, bdaddr); memcpy(key->val, val, HCI_LINK_KEY_SIZE); key->pin_len = pin_len; if (type == HCI_LK_CHANGED_COMBINATION) key->type = old_key_type; else key->type = type; if (persistent) *persistent = hci_persistent_key(hdev, conn, type, old_key_type); return key; } struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) { struct smp_ltk *key, *old_key; u8 role = ltk_role(type); old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); if (old_key) key = old_key; else { key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->long_term_keys); } bacpy(&key->bdaddr, bdaddr); key->bdaddr_type = addr_type; memcpy(key->val, tk, sizeof(key->val)); key->authenticated = authenticated; key->ediv = ediv; key->rand = rand; key->enc_size = enc_size; key->type = type; return key; } struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); if (!irk) { irk = kzalloc(sizeof(*irk), GFP_KERNEL); if (!irk) return NULL; bacpy(&irk->bdaddr, bdaddr); irk->addr_type = addr_type; list_add_rcu(&irk->list, &hdev->identity_resolving_keys); } memcpy(irk->val, val, 16); bacpy(&irk->rpa, rpa); return irk; } int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *key; key = hci_find_link_key(hdev, bdaddr); if (!key) return -ENOENT; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&key->list); kfree_rcu(key, rcu); return 0; } int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct smp_ltk *k, *tmp; int removed = 0; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); removed++; } return removed ? 0 : -ENOENT; } void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); } } bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { struct smp_ltk *k; struct smp_irk *irk; u8 addr_type; if (type == BDADDR_BREDR) { if (hci_find_link_key(hdev, bdaddr)) return true; return false; } /* Convert to HCI addr type which struct smp_ltk uses */ if (type == BDADDR_LE_PUBLIC) addr_type = ADDR_LE_DEV_PUBLIC; else addr_type = ADDR_LE_DEV_RANDOM; irk = hci_get_irk(hdev, bdaddr, addr_type); if (irk) { bdaddr = &irk->bdaddr; addr_type = irk->addr_type; } rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* HCI command timer function */ static void hci_cmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_timer.work); if (hdev->req_skb) { u16 opcode = hci_skb_opcode(hdev->req_skb); bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); } else { bt_dev_err(hdev, "command tx timeout"); } if (hdev->reset) hdev->reset(hdev); atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); } /* HCI ncmd timer function */ static void hci_ncmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, ncmd_timer.work); bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); /* During HCI_INIT phase no events can be injected if the ncmd timer * triggers since the procedure has its own timeout handling. */ if (test_bit(HCI_INIT, &hdev->flags)) return; /* This is an irrecoverable state, inject hardware error event */ hci_reset_dev(hdev); } struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; list_for_each_entry(data, &hdev->remote_oob_data, list) { if (bacmp(bdaddr, &data->bdaddr) != 0) continue; if (data->bdaddr_type != bdaddr_type) continue; return data; } return NULL; } int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) return -ENOENT; BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); list_del(&data->list); kfree(data); return 0; } void hci_remote_oob_data_clear(struct hci_dev *hdev) { struct oob_data *data, *n; list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { list_del(&data->list); kfree(data); } } int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) { data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bacpy(&data->bdaddr, bdaddr); data->bdaddr_type = bdaddr_type; list_add(&data->list, &hdev->remote_oob_data); } if (hash192 && rand192) { memcpy(data->hash192, hash192, sizeof(data->hash192)); memcpy(data->rand192, rand192, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x03; } else { memset(data->hash192, 0, sizeof(data->hash192)); memset(data->rand192, 0, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x02; else data->present = 0x00; } if (hash256 && rand256) { memcpy(data->hash256, hash256, sizeof(data->hash256)); memcpy(data->rand256, rand256, sizeof(data->rand256)); } else { memset(data->hash256, 0, sizeof(data->hash256)); memset(data->rand256, 0, sizeof(data->rand256)); if (hash192 && rand192) data->present = 0x01; } BT_DBG("%s for %pMR", hdev->name, bdaddr); return 0; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { if (adv_instance->instance == instance) return adv_instance; } return NULL; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; cur_instance = hci_find_adv_instance(hdev, instance); if (!cur_instance) return NULL; if (cur_instance == list_last_entry(&hdev->adv_instances, struct adv_info, list)) return list_first_entry(&hdev->adv_instances, struct adv_info, list); else return list_next_entry(cur_instance, list); } /* This function requires the caller holds hdev->lock */ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT; BT_DBG("%s removing %dMR", hdev->name, instance); if (hdev->cur_adv_instance == instance) { if (hdev->adv_instance_timeout) { cancel_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } hdev->cur_adv_instance = 0x00; } cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); hdev->adv_instance_cnt--; return 0; } void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) { struct adv_info *adv_instance, *n; list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) adv_instance->rpa_expired = rpa_expired; } /* This function requires the caller holds hdev->lock */ void hci_adv_instances_clear(struct hci_dev *hdev) { struct adv_info *adv_instance, *n; if (hdev->adv_instance_timeout) { disable_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { disable_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; } static void adv_instance_rpa_expired(struct work_struct *work) { struct adv_info *adv_instance = container_of(work, struct adv_info, rpa_expired_cb.work); BT_DBG(""); adv_instance->rpa_expired = true; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); if (adv) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || instance < 1 || instance > hdev->le_num_of_adv_sets + 1) return ERR_PTR(-EOVERFLOW); adv = kzalloc(sizeof(*adv), GFP_KERNEL); if (!adv) return ERR_PTR(-ENOMEM); adv->pending = true; adv->instance = instance; /* If controller support only one set and the instance is set to * 1 then there is no option other than using handle 0x00. */ if (hdev->le_num_of_adv_sets == 1 && instance == 1) adv->handle = 0x00; else adv->handle = instance; list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } adv->flags = flags; adv->min_interval = min_interval; adv->max_interval = max_interval; adv->tx_power = tx_power; /* Defining a mesh_handle changes the timing units to ms, * rather than seconds, and ties the instance to the requested * mesh_tx queue. */ adv->mesh = mesh_handle; hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, scan_rsp_len, scan_rsp_data); adv->timeout = timeout; adv->remaining_time = timeout; if (duration == 0) adv->duration = hdev->def_multi_adv_rotation_duration; else adv->duration = duration; INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); return adv; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval) { struct adv_info *adv; adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, min_interval, max_interval, 0); if (IS_ERR(adv)) return adv; adv->periodic = true; adv->per_adv_data_len = data_len; if (data) memcpy(adv->per_adv_data, data, data_len); return adv; } /* This function requires the caller holds hdev->lock */ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); /* If advertisement doesn't exist, we can't modify its data */ if (!adv) return -ENOENT; if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memcpy(adv->adv_data, adv_data, adv_data_len); adv->adv_data_len = adv_data_len; adv->adv_data_changed = true; } if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); adv->scan_rsp_len = scan_rsp_len; adv->scan_rsp_changed = true; } /* Mark as changed if there are flags which would affect it */ if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) adv->scan_rsp_changed = true; return 0; } /* This function requires the caller holds hdev->lock */ u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) { u32 flags; struct adv_info *adv; if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields */ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) flags |= MGMT_ADV_FLAG_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_DISCOV; return flags; } adv = hci_find_adv_instance(hdev, instance); /* Return 0 when we got an invalid instance identifier. */ if (!adv) return 0; return adv->flags; } bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv; /* Instance 0x00 always set local name */ if (instance == 0x00) return true; adv = hci_find_adv_instance(hdev, instance); if (!adv) return false; if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) return true; return adv->scan_rsp_len ? true : false; } /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { struct adv_monitor *monitor; int handle; idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) hci_free_adv_monitor(hdev, monitor); idr_destroy(&hdev->adv_monitors_idr); } /* Frees the monitor structure and do some bookkeepings. * This function requires the caller holds hdev->lock. */ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct adv_pattern *pattern; struct adv_pattern *tmp; if (!monitor) return; list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { list_del(&pattern->list); kfree(pattern); } if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { hdev->adv_monitors_cnt--; mgmt_adv_monitor_removed(hdev, monitor->handle); } kfree(monitor); } /* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. * This function requires the caller holds hci_req_sync_lock. */ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int min, max, handle; int status = 0; if (!monitor) return -EINVAL; hci_dev_lock(hdev); min = HCI_MIN_ADV_MONITOR_HANDLE; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, GFP_KERNEL); hci_dev_unlock(hdev); if (handle < 0) return handle; monitor->handle = handle; if (!hdev_is_powered(hdev)) return status; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: bt_dev_dbg(hdev, "add monitor %d status %d", monitor->handle, status); /* Message was not forwarded to controller - not an error */ break; case HCI_ADV_MONITOR_EXT_MSFT: status = msft_add_monitor_pattern(hdev, monitor); bt_dev_dbg(hdev, "add monitor %d msft status %d", handle, status); break; } return status; } /* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. * This function requires the caller holds hci_req_sync_lock. */ static int hci_remove_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int status = 0; int handle; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ bt_dev_dbg(hdev, "remove monitor %d status %d", monitor->handle, status); goto free_monitor; case HCI_ADV_MONITOR_EXT_MSFT: handle = monitor->handle; status = msft_remove_monitor(hdev, monitor); bt_dev_dbg(hdev, "remove monitor %d msft status %d", handle, status); break; } /* In case no matching handle registered, just free the monitor */ if (status == -ENOENT) goto free_monitor; return status; free_monitor: if (status == -ENOENT) bt_dev_warn(hdev, "Removing monitor with no matching handle %d", monitor->handle); hci_free_adv_monitor(hdev, monitor); return status; } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) { struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); if (!monitor) return -EINVAL; return hci_remove_adv_monitor(hdev, monitor); } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_all_adv_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; int idr_next_id = 0; int status = 0; while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break; status = hci_remove_adv_monitor(hdev, monitor); if (status) return status; idr_next_id++; } return status; } /* This function requires the caller holds hdev->lock */ bool hci_is_adv_monitoring(struct hci_dev *hdev) { return !idr_is_empty(&hdev->adv_monitors_idr); } int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) { if (msft_monitor_supported(hdev)) return HCI_ADV_MONITOR_EXT_MSFT; return HCI_ADV_MONITOR_EXT_NONE; } struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_flags *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; list_for_each_entry_safe(b, n, bdaddr_list, list) { list_del(&b->list); kfree(b); } } int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; if (peer_irk) memcpy(entry->peer_irk, peer_irk, 16); if (local_irk) memcpy(entry->local_irk, local_irk, 16); list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags) { struct bdaddr_list_with_flags *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; entry->flags = flags; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; list_for_each_entry(params, &hdev->le_conn_params, list) { if (bacmp(&params->addr, addr) == 0 && params->addr_type == addr_type) { return params; } } return NULL; } /* This function requires the caller holds hdev->lock or rcu_read_lock */ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *param; rcu_read_lock(); list_for_each_entry_rcu(param, list, action) { if (bacmp(&param->addr, addr) == 0 && param->addr_type == addr_type) { rcu_read_unlock(); return param; } } rcu_read_unlock(); return NULL; } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_del_init(struct hci_conn_params *param) { if (list_empty(&param->action)) return; list_del_rcu(&param->action); synchronize_rcu(); INIT_LIST_HEAD(&param->action); } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list) { list_add_rcu(&param->action, list); } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (params) return params; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { bt_dev_err(hdev, "out of memory"); return NULL; } bacpy(&params->addr, addr); params->addr_type = addr_type; list_add(&params->list, &hdev->le_conn_params); INIT_LIST_HEAD(&params->action); params->conn_min_interval = hdev->le_conn_min_interval; params->conn_max_interval = hdev->le_conn_max_interval; params->conn_latency = hdev->le_conn_latency; params->supervision_timeout = hdev->le_supv_timeout; params->auto_connect = HCI_AUTO_CONN_DISABLED; BT_DBG("addr %pMR (type %u)", addr, addr_type); return params; } void hci_conn_params_free(struct hci_conn_params *params) { hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); } list_del(&params->list); kfree(params); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (!params) return; hci_conn_params_free(params); hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_clear_disabled(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue; /* If trying to establish one time connection to disabled * device, leave the params, but mark them as just once. */ if (params->explicit_connect) { params->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } hci_conn_params_free(params); } BT_DBG("All LE disabled connection parameters were removed"); } /* This function requires the caller holds hdev->lock */ static void hci_conn_params_clear_all(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) hci_conn_params_free(params); BT_DBG("All LE connection parameters were removed"); } /* Copy the Identity Address of the controller. * * If the controller has a public BD_ADDR, then by default use that one. * If this is a LE only controller without a public address, default to * the static random address. * * For debugging purposes it is possible to force controllers with a * public address to use the static random address instead. * * In case BR/EDR has been disabled on a dual-mode controller and * userspace has configured a static address, then that address * becomes the identity address instead of the public BR/EDR address. */ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type) { if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { bacpy(bdaddr, &hdev->static_addr); *bdaddr_type = ADDR_LE_DEV_RANDOM; } else { bacpy(bdaddr, &hdev->bdaddr); *bdaddr_type = ADDR_LE_DEV_PUBLIC; } } static void hci_clear_wake_reason(struct hci_dev *hdev) { hci_dev_lock(hdev); hdev->wake_reason = 0; bacpy(&hdev->wake_addr, BDADDR_ANY); hdev->wake_addr_type = 0; hci_dev_unlock(hdev); } static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct hci_dev *hdev = container_of(nb, struct hci_dev, suspend_notifier); int ret = 0; /* Userspace has full control of this device. Do nothing. */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return NOTIFY_DONE; /* To avoid a potential race with hci_unregister_dev. */ hci_dev_hold(hdev); switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: ret = hci_suspend_dev(hdev); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: ret = hci_resume_dev(hdev); break; } if (ret) bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); hci_dev_put(hdev); return NOTIFY_DONE; } /* Alloc HCI device */ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) { struct hci_dev *hdev; unsigned int alloc_size; alloc_size = sizeof(*hdev); if (sizeof_priv) { /* Fixme: May need ALIGN-ment? */ alloc_size += sizeof_priv; } hdev = kzalloc(alloc_size, GFP_KERNEL); if (!hdev) return NULL; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->num_iac = 0x01; /* One IAC support is mandatory */ hdev->io_capability = 0x03; /* No Input No Output */ hdev->manufacturer = 0xffff; /* Default to internal use */ hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; hdev->adv_instance_timeout = 0; hdev->advmon_allowlist_duration = 300; hdev->advmon_no_filter_duration = 500; hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; hdev->le_adv_channel_map = 0x07; hdev->le_adv_min_interval = 0x0800; hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST; hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST; hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1; hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1; hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST; hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST; hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN; hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; hdev->le_supv_timeout = 0x002a; hdev->le_def_tx_len = 0x001b; hdev->le_def_tx_time = 0x0148; hdev->le_max_tx_len = 0x001b; hdev->le_max_tx_time = 0x0148; hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_time = 0x0148; hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT; hdev->min_le_tx_power = HCI_TX_POWER_INVALID; hdev->max_le_tx_power = HCI_TX_POWER_INVALID; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; /* default 1.28 sec page scan */ hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; hdev->def_page_scan_int = 0x0800; hdev->def_page_scan_window = 0x0012; mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); ida_init(&hdev->unset_handle_ida); INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->reject_list); INIT_LIST_HEAD(&hdev->accept_list); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->le_accept_list); INIT_LIST_HEAD(&hdev->le_resolv_list); INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_LIST_HEAD(&hdev->adv_instances); INIT_LIST_HEAD(&hdev->blocked_keys); INIT_LIST_HEAD(&hdev->monitored_devices); INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); hci_devcd_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); return hdev; } EXPORT_SYMBOL(hci_alloc_dev_priv); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) { /* will free via device release */ put_device(&hdev->dev); } EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { int id, error; if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); if (id < 0) return id; error = dev_set_name(&hdev->dev, "hci%u", id); if (error) return error; hdev->name = dev_name(&hdev->dev); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->workqueue) { error = -ENOMEM; goto err; } hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->req_workqueue) { destroy_workqueue(hdev->workqueue); error = -ENOMEM; goto err; } if (!IS_ERR_OR_NULL(bt_debugfs)) hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; hci_leds_init(hdev); hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); if (hdev->rfkill) { if (rfkill_register(hdev->rfkill) < 0) { rfkill_destroy(hdev->rfkill); hdev->rfkill = NULL; } } if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) hci_dev_set_flag(hdev, HCI_RFKILLED); hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_AUTO_OFF); /* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init. */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); write_lock(&hci_dev_list_lock); list_add(&hdev->list, &hci_dev_list); write_unlock(&hci_dev_list_lock); /* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* Mark Remote Wakeup connection flag as supported if driver has wakeup * callback. */ if (hdev->wakeup) hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); error = hci_register_suspend_notifier(hdev); if (error) BT_WARN("register suspend notifier failed error:%d\n", error); queue_work(hdev->req_workqueue, &hdev->power_on); idr_init(&hdev->adv_monitors_idr); msft_register(hdev); return id; err_wqueue: debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: ida_free(&hci_index_ida, hdev->id); return error; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); disable_work_sync(&hdev->rx_work); disable_work_sync(&hdev->cmd_work); disable_work_sync(&hdev->tx_work); disable_work_sync(&hdev->power_on); disable_work_sync(&hdev->error_reset); hci_cmd_sync_clear(hdev); hci_unregister_suspend_notifier(hdev); hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); } /* mgmt_index_removed should take care of emptying the * pending list */ BUG_ON(!list_empty(&hdev->mgmt_pending)); hci_sock_dev_event(hdev, HCI_DEV_UNREG); if (hdev->rfkill) { rfkill_unregister(hdev->rfkill); rfkill_destroy(hdev->rfkill); } device_del(&hdev->dev); /* Actual cleanup is deferred until hci_release_dev(). */ hci_dev_put(hdev); } EXPORT_SYMBOL(hci_unregister_dev); /* Release HCI device */ void hci_release_dev(struct hci_dev *hdev) { debugfs_remove_recursive(hdev->debugfs); kfree_const(hdev->hw_info); kfree_const(hdev->fw_info); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->reject_list); hci_bdaddr_list_clear(&hdev->accept_list); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); hci_blocked_keys_clear(hdev); hci_codec_list_clear(&hdev->local_codecs); msft_release(hdev); hci_dev_unlock(hdev); ida_destroy(&hdev->unset_handle_ida); ida_free(&hci_index_ida, hdev->id); kfree_skb(hdev->sent_cmd); kfree_skb(hdev->req_skb); kfree_skb(hdev->recv_event); kfree(hdev); } EXPORT_SYMBOL(hci_release_dev); int hci_register_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (!hdev->suspend_notifier.notifier_call && !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { hdev->suspend_notifier.notifier_call = hci_suspend_notifier; ret = register_pm_notifier(&hdev->suspend_notifier); } return ret; } int hci_unregister_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (hdev->suspend_notifier.notifier_call) { ret = unregister_pm_notifier(&hdev->suspend_notifier); if (!ret) hdev->suspend_notifier.notifier_call = NULL; } return ret; } /* Cancel ongoing command synchronously: * * - Cancel command timer * - Reset command counter * - Cancel command request */ static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { disable_delayed_work_sync(&hdev->cmd_timer); disable_delayed_work_sync(&hdev->ncmd_timer); } else { cancel_delayed_work_sync(&hdev->cmd_timer); cancel_delayed_work_sync(&hdev->ncmd_timer); } atomic_set(&hdev->cmd_cnt, 1); hci_cmd_sync_cancel_sync(hdev, err); } /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Suspend should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to suspend */ if (mgmt_powering_down(hdev)) return 0; /* Cancel potentially blocking sync operation before suspend */ hci_cancel_cmd_sync(hdev, EHOSTDOWN); hci_req_sync_lock(hdev); ret = hci_suspend_sync(hdev); hci_req_sync_unlock(hdev); hci_clear_wake_reason(hdev); mgmt_suspending(hdev, hdev->suspend_state); hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); return ret; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Resume should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to resume */ if (mgmt_powering_down(hdev)) return 0; hci_req_sync_lock(hdev); ret = hci_resume_sync(hdev); hci_req_sync_unlock(hdev); mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, hdev->wake_addr_type); hci_sock_dev_event(hdev, HCI_DEV_RESUME); return ret; } EXPORT_SYMBOL(hci_resume_dev); /* Reset HCI device */ int hci_reset_dev(struct hci_dev *hdev) { static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; struct sk_buff *skb; skb = bt_skb_alloc(3, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, hw_err, 3); bt_dev_err(hdev, "Injecting HCI hardware error event"); /* Send Hardware Error to upper stack */ return hci_recv_frame(hdev, skb); } EXPORT_SYMBOL(hci_reset_dev); static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) { if (hdev->classify_pkt_type) return hdev->classify_pkt_type(hdev, skb); return hci_skb_pkt_type(skb); } /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { u8 dev_pkt_type; if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } /* Check if the driver agree with packet type classification */ dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); if (hci_skb_pkt_type(skb) != dev_pkt_type) { hci_skb_pkt_type(skb) = dev_pkt_type; } switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: break; case HCI_ACLDATA_PKT: /* Detect if ISO packet has been sent as ACL */ if (hci_conn_num(hdev, ISO_LINK)) { __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); __u8 type; type = hci_conn_lookup_type(hdev, hci_handle(handle)); if (type == ISO_LINK) hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; } break; case HCI_SCODATA_PKT: break; case HCI_ISODATA_PKT: break; default: kfree_skb(skb); return -EINVAL; } /* Incoming skb */ bt_cb(skb)->incoming = 1; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_frame); /* Receive diagnostic message from HCI drivers */ int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) { /* Mark as diagnostic packet */ hci_skb_pkt_type(skb) = HCI_DIAG_PKT; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_diag); void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->hw_info); hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_hw_info); void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->fw_info); hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_fw_info); /* ---- Interface to upper protocols ---- */ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); list_add_tail_rcu(&cb->list, &hci_cb_list); return 0; } EXPORT_SYMBOL(hci_register_cb); int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); list_del_rcu(&cb->list); synchronize_rcu(); return 0; } EXPORT_SYMBOL(hci_unregister_cb); static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { int err; BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); /* Time stamp */ __net_timestamp(skb); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); if (!test_bit(HCI_RUNNING, &hdev->flags)) { kfree_skb(skb); return -EINVAL; } err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); kfree_skb(skb); return err; } return 0; } /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param) { struct sk_buff *skb; BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; } /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { struct sk_buff *skb; if (hci_opcode_ogf(opcode) != 0x3f) { /* A controller receiving a command shall respond with either * a Command Status Event or a Command Complete Event. * Therefore, all standard HCI commands must be sent via the * standard API, using hci_send_cmd or hci_cmd_sync helpers. * Some vendors do not comply with this rule for vendor-specific * commands and do not return any event. We want to support * unresponded commands for such cases only. */ bt_dev_err(hdev, "unresponded command not supported"); return -EINVAL; } skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); return -ENOMEM; } hci_send_frame(hdev, skb); return 0; } EXPORT_SYMBOL(__hci_cmd_send); /* Get data from the previously sent command */ static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) { struct hci_command_hdr *hdr; if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) return NULL; hdr = (void *)skb->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; return skb->data + HCI_COMMAND_HDR_SIZE; } /* Get data from the previously sent command */ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { void *data; /* Check if opcode matches last sent command */ data = hci_cmd_data(hdev->sent_cmd, opcode); if (!data) /* Check if opcode matches last request */ data = hci_cmd_data(hdev->req_skb, opcode); return data; } /* Get data from last received event */ void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) { struct hci_event_hdr *hdr; int offset; if (!hdev->recv_event) return NULL; hdr = (void *)hdev->recv_event->data; offset = sizeof(*hdr); if (hdr->evt != event) { /* In case of LE metaevent check the subevent match */ if (hdr->evt == HCI_EV_LE_META) { struct hci_ev_le_meta *ev; ev = (void *)hdev->recv_event->data + offset; offset += sizeof(*ev); if (ev->subevent == event) goto found; } return NULL; } found: bt_dev_dbg(hdev, "event 0x%2.2x", event); return hdev->recv_event->data + offset; } /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { struct hci_acl_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ACL_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_acl_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, struct sk_buff *skb, __u16 flags) { struct hci_conn *conn = chan->conn; struct hci_dev *hdev = conn->hdev; struct sk_buff *list; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags); list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically. We need to use spin_lock_bh * here because of 6LoWPAN links, as there this function is * called from softirq and using normal spin lock could cause * deadlocks. */ spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); flags &= ~ACL_START; flags |= ACL_CONT; do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); spin_unlock_bh(&queue->lock); } } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = chan->conn->hdev; BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); hci_queue_acl(chan, &chan->data_q, skb, flags); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send SCO data */ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct hci_sco_hdr hdr; BT_DBG("%s len %d", hdev->name, skb->len); hdr.handle = cpu_to_le16(conn->handle); hdr.dlen = skb->len; skb_push(skb, HCI_SCO_HDR_SIZE); skb_reset_transport_header(skb); memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send ISO data */ static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) { struct hci_iso_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ISO_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_iso_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; __u16 flags; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; list = skb_shinfo(skb)->frag_list; flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; __skb_queue_tail(queue, skb); do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); } } void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s len %d", hdev->name, skb->len); hci_queue_iso(conn, &conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) { struct hci_dev *hdev; int cnt, q; if (!conn) { *quote = 0; return; } hdev = conn->hdev; switch (conn->type) { case ACL_LINK: cnt = hdev->acl_cnt; break; case SCO_LINK: case ESCO_LINK: cnt = hdev->sco_cnt; break; case LE_LINK: cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; case ISO_LINK: cnt = hdev->iso_mtu ? hdev->iso_cnt : hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; default: cnt = 0; bt_dev_err(hdev, "unknown link type %d", conn->type); } q = cnt / num; *quote = q ? q : 1; } static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; unsigned int num = 0, min = ~0; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != type || skb_queue_empty(&c->data_q)) continue; if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; } static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; bt_dev_err(hdev, "link tx timeout"); rcu_read_lock(); /* Kill stalled connections */ list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); /* hci_disconnect might sleep, so, we have to release * the RCU read lock before calling it. */ rcu_read_unlock(); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); rcu_read_lock(); } } rcu_read_unlock(); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; int conn_num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *tmp; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; conn_num++; list_for_each_entry_rcu(tmp, &conn->chan_list, list) { struct sk_buff *skb; if (skb_queue_empty(&tmp->data_q)) continue; skb = skb_peek(&tmp->data_q); if (skb->priority < cur_prio) continue; if (skb->priority > cur_prio) { num = 0; min = ~0; cur_prio = skb->priority; } num++; if (conn->sent < min) { min = conn->sent; chan = tmp; } } if (hci_conn_num(hdev, type) == conn_num) break; } rcu_read_unlock(); if (!chan) return NULL; hci_quote_sent(chan->conn, num, quote); BT_DBG("chan %p quote %d", chan, *quote); return chan; } static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn; int num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *chan; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; num++; list_for_each_entry_rcu(chan, &conn->chan_list, list) { struct sk_buff *skb; if (chan->sent) { chan->sent = 0; continue; } if (skb_queue_empty(&chan->data_q)) continue; skb = skb_peek(&chan->data_q); if (skb->priority >= HCI_PRIO_MAX - 1) continue; skb->priority = HCI_PRIO_MAX - 1; BT_DBG("chan %p skb %p promoted to %d", chan, skb, skb->priority); } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); } static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { unsigned long last_tx; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return; switch (type) { case LE_LINK: last_tx = hdev->le_last_tx; break; default: last_tx = hdev->acl_last_tx; break; } /* tx timeout must be longer than maximum link supervision timeout * (40.9 seconds) */ if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) hci_link_tx_to(hdev, type); } /* Schedule SCO */ static void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, SCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ESCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_acl_pkt(struct hci_dev *hdev) { unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); hci_send_frame(hdev, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (cnt != hdev->acl_cnt) hci_prio_recalculate(hdev, ACL_LINK); } static void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); /* No ACL link over BR/EDR controller */ if (!hci_conn_num(hdev, ACL_LINK)) return; hci_sched_acl_pkt(hdev); } static void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; int quote, *cnt, tmp; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, LE_LINK)) return; cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; __check_timeout(hdev, *cnt, LE_LINK); tmp = *cnt; while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_send_frame(hdev, skb); hdev->le_last_tx = jiffies; (*cnt)--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (*cnt != tmp) hci_prio_recalculate(hdev, LE_LINK); } /* Schedule CIS */ static void hci_sched_iso(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote, *cnt; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ISO_LINK)) return; cnt = hdev->iso_pkts ? &hdev->iso_cnt : hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; (*cnt)--; } } } static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ hci_sched_sco(hdev); hci_sched_esco(hdev); hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); } /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(hdev, skb); } /* ----- HCI RX task (incoming data processing) ----- */ /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; hdr = skb_pull_data(skb, sizeof(*hdr)); if (!hdr) { bt_dev_err(hdev, "ACL packet too small"); goto drop; } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); /* Send to upper protocol */ l2cap_recv_acldata(conn, skb, flags); return; } else { bt_dev_err(hdev, "ACL packet for unknown connection handle %d", handle); } drop: kfree_skb(skb); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; hdr = skb_pull_data(skb, sizeof(*hdr)); if (!hdr) { bt_dev_err(hdev, "SCO packet too small"); goto drop; } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, handle, flags); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { /* Send to upper protocol */ hci_skb_pkt_status(skb) = flags & 0x03; sco_recv_scodata(conn, skb); return; } else { bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", handle); } drop: kfree_skb(skb); } static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_iso_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; hdr = skb_pull_data(skb, sizeof(*hdr)); if (!hdr) { bt_dev_err(hdev, "ISO packet too small"); goto drop; } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, handle, flags); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (!conn) { bt_dev_err(hdev, "ISO packet for unknown connection handle %d", handle); goto drop; } /* Send to upper protocol */ iso_recv(conn, skb, flags); return; drop: kfree_skb(skb); } static bool hci_req_is_complete(struct hci_dev *hdev) { struct sk_buff *skb; skb = skb_peek(&hdev->cmd_q); if (!skb) return true; return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); } static void hci_resend_last(struct hci_dev *hdev) { struct hci_command_hdr *sent; struct sk_buff *skb; u16 opcode; if (!hdev->sent_cmd) return; sent = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(sent->opcode); if (opcode == HCI_OP_RESET) return; skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); if (!skb) return; skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct sk_buff *skb; unsigned long flags; BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); /* If the completed command doesn't match the last one that was * sent we need to do special handling of it. */ if (!hci_sent_cmd_data(hdev, opcode)) { /* Some CSR based controllers generate a spontaneous * reset complete event during init and any pending * command will never be completed. In such a case we * need to resend whatever was the last sent * command. */ if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) hci_resend_last(hdev); return; } /* If we reach this point this event matches the last command sent */ hci_dev_clear_flag(hdev, HCI_CMD_PENDING); /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ if (!status && !hci_req_is_complete(hdev)) return; skb = hdev->req_skb; /* If this was the last command in a request the complete * callback would be found in hdev->req_skb instead of the * command queue (hdev->cmd_q). */ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; return; } if (skb && bt_cb(skb)->hci.req_complete) { *req_complete = bt_cb(skb)->hci.req_complete; return; } /* Remove all pending commands belonging to this request */ spin_lock_irqsave(&hdev->cmd_q.lock, flags); while ((skb = __skb_dequeue(&hdev->cmd_q))) { if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { __skb_queue_head(&hdev->cmd_q, skb); break; } if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } static void hci_rx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); struct sk_buff *skb; BT_DBG("%s", hdev->name); /* The kcov_remote functions used for collecting packet parsing * coverage information from this background thread and associate * the coverage with the syscall's thread which originally injected * the packet. This helps fuzzing the kernel. */ for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* If the device has been opened in HCI_USER_CHANNEL, * the userspace has exclusive access to device. * When device is HCI_INIT, we still need to process * the data packets to the driver in order * to complete its setup(). */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: kfree_skb(skb); continue; } } /* Process frame */ switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: BT_DBG("%s Event packet", hdev->name); hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; case HCI_ISODATA_PKT: BT_DBG("%s ISO data packet", hdev->name); hci_isodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } } static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) { int err; bt_dev_dbg(hdev, "skb %p", skb); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); if (!hdev->sent_cmd) { skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); return; } err = hci_send_frame(hdev, skb); if (err < 0) { hci_cmd_sync_cancel_sync(hdev, -err); return; } if (hdev->req_status == HCI_REQ_PEND && !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { kfree_skb(hdev->req_skb); hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); } atomic_dec(&hdev->cmd_cnt); } static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt)) { skb = skb_dequeue(&hdev->cmd_q); if (!skb) return; hci_send_cmd_sync(hdev, skb); rcu_read_lock(); if (test_bit(HCI_RESET, &hdev->flags) || hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) cancel_delayed_work(&hdev->cmd_timer); else queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, HCI_CMD_TIMEOUT); rcu_read_unlock(); } }
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 // SPDX-License-Identifier: GPL-2.0+ /* * Special Initializers for certain USB Mass Storage devices * * Current development and maintenance by: * (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. */ #include <linux/errno.h> #include "usb.h" #include "initializers.h" #include "debug.h" #include "transport.h" /* * This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target * mode */ int usb_stor_euscsi_init(struct us_data *us) { int result; usb_stor_dbg(us, "Attempting to init eUSCSI bridge...\n"); result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 0x01, 0x0, NULL, 0x0, 5 * HZ); usb_stor_dbg(us, "-- result is %d\n", result); return 0; } /* * This function is required to activate all four slots on the UCR-61S2B * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf; int res; unsigned int partial; static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS"; usb_stor_dbg(us, "Sending UCR-61S2B initialization packet...\n"); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Tag = 0; bcb->DataTransferLength = cpu_to_le32(0); bcb->Flags = bcb->Lun = 0; bcb->Length = sizeof(init_string) - 1; memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, init_string, sizeof(init_string) - 1); res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, &partial); if (res) return -EIO; usb_stor_dbg(us, "Getting status packet...\n"); res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &partial); if (res) return -EIO; return 0; } /* This places the HUAWEI E220 devices in multi-port mode */ int usb_stor_huawei_e220_init(struct us_data *us) { int result; result = usb_stor_control_msg(us, us->send_ctrl_pipe, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE, 0x01, 0x0, NULL, 0x0, 1 * HZ); usb_stor_dbg(us, "Huawei mode set result is %d\n", result); return 0; }
11 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 /* SPDX-License-Identifier: GPL-2.0 */ /* * Compact binary representation of ihex records. Some devices need their * firmware loaded in strange orders rather than a single big blob, but * actually parsing ihex-as-text within the kernel seems silly. Thus,... */ #ifndef __LINUX_IHEX_H__ #define __LINUX_IHEX_H__ #include <linux/types.h> #include <linux/firmware.h> #include <linux/device.h> /* Intel HEX files actually limit the length to 256 bytes, but we have drivers which would benefit from using separate records which are longer than that, so we extend to 16 bits of length */ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[]; } __attribute__((packed)); static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) { return be16_to_cpu(p->len) + sizeof(*p); } /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * __ihex_next_binrec(const struct ihex_binrec *rec) { const void *p = rec; return p + ALIGN(ihex_binrec_size(rec), 4); } static inline const struct ihex_binrec * ihex_next_binrec(const struct ihex_binrec *rec) { rec = __ihex_next_binrec(rec); return be16_to_cpu(rec->len) ? rec : NULL; } /* Check that ihex_next_binrec() won't take us off the end of the image... */ static inline int ihex_validate_fw(const struct firmware *fw) { const struct ihex_binrec *end, *rec; rec = (const void *)fw->data; end = (const void *)&fw->data[fw->size - sizeof(*end)]; for (; rec <= end; rec = __ihex_next_binrec(rec)) { /* Zero length marks end of records */ if (rec == end && !be16_to_cpu(rec->len)) return 0; } return -EINVAL; } /* Request firmware and validate it so that we can trust we won't * run off the end while reading records... */ static inline int request_ihex_firmware(const struct firmware **fw, const char *fw_name, struct device *dev) { const struct firmware *lfw; int ret; ret = request_firmware(&lfw, fw_name, dev); if (ret) return ret; ret = ihex_validate_fw(lfw); if (ret) { dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", fw_name); release_firmware(lfw); return ret; } *fw = lfw; return 0; } #endif /* __LINUX_IHEX_H__ */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* cx231xx.h - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver */ #ifndef _CX231XX_H #define _CX231XX_H #include <linux/videodev2.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/i2c.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/usb.h> #include <media/drv-intf/cx2341x.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> #include <media/rc-core.h> #include <media/i2c/ir-kbd-i2c.h> #include "cx231xx-reg.h" #include "cx231xx-pcb-cfg.h" #include "cx231xx-conf-reg.h" #define DRIVER_NAME "cx231xx" #define PWR_SLEEP_INTERVAL 10 /* I2C addresses for control block in Cx231xx */ #define AFE_DEVICE_ADDRESS 0x60 #define I2S_BLK_DEVICE_ADDRESS 0x98 #define VID_BLK_I2C_ADDRESS 0x88 #define VERVE_I2C_ADDRESS 0x40 #define DIF_USE_BASEBAND 0xFFFFFFFF /* Boards supported by driver */ #define CX231XX_BOARD_UNKNOWN 0 #define CX231XX_BOARD_CNXT_CARRAERA 1 #define CX231XX_BOARD_CNXT_SHELBY 2 #define CX231XX_BOARD_CNXT_RDE_253S 3 #define CX231XX_BOARD_CNXT_RDU_253S 4 #define CX231XX_BOARD_CNXT_VIDEO_GRABBER 5 #define CX231XX_BOARD_CNXT_RDE_250 6 #define CX231XX_BOARD_CNXT_RDU_250 7 #define CX231XX_BOARD_HAUPPAUGE_EXETER 8 #define CX231XX_BOARD_HAUPPAUGE_USBLIVE2 9 #define CX231XX_BOARD_PV_PLAYTV_USB_HYBRID 10 #define CX231XX_BOARD_PV_XCAPTURE_USB 11 #define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12 #define CX231XX_BOARD_ICONBIT_U100 13 #define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14 #define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15 #define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16 #define CX231XX_BOARD_OTG102 17 #define CX231XX_BOARD_KWORLD_UB445_USB_HYBRID 18 #define CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx 19 #define CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx 20 #define CX231XX_BOARD_HAUPPAUGE_955Q 21 #define CX231XX_BOARD_TERRATEC_GRABBY 22 #define CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD 23 #define CX231XX_BOARD_ASTROMETA_T2HYBRID 24 #define CX231XX_BOARD_THE_IMAGING_SOURCE_DFG_USB2_PRO 25 #define CX231XX_BOARD_HAUPPAUGE_935C 26 #define CX231XX_BOARD_HAUPPAUGE_975 27 /* Limits minimum and default number of buffers */ #define CX231XX_MIN_BUF 4 #define CX231XX_DEF_BUF 12 #define CX231XX_DEF_VBI_BUF 6 #define VBI_LINE_COUNT 17 #define VBI_LINE_LENGTH 1440 /*Limits the max URB message size */ #define URB_MAX_CTRL_SIZE 80 /* Params for validated field */ #define CX231XX_BOARD_NOT_VALIDATED 1 #define CX231XX_BOARD_VALIDATED 0 /* maximum number of cx231xx boards */ #define CX231XX_MAXBOARDS 8 /* maximum number of frames that can be queued */ #define CX231XX_NUM_FRAMES 5 /* number of buffers for isoc transfers */ #define CX231XX_NUM_BUFS 8 /* number of packets for each buffer windows requests only 40 packets .. so we better do the same this is what I found out for all alternate numbers there! */ #define CX231XX_NUM_PACKETS 40 /* default alternate; 0 means choose the best */ #define CX231XX_PINOUT 0 #define CX231XX_INTERLACED_DEFAULT 1 /* time to wait when stopping the isoc transfer */ #define CX231XX_URB_TIMEOUT \ msecs_to_jiffies(CX231XX_NUM_BUFS * CX231XX_NUM_PACKETS) #define CX231xx_NORMS (\ V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443 | \ V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \ V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \ V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK) #define SLEEP_S5H1432 30 #define CX23417_OSC_EN 8 #define CX23417_RESET 9 #define EP5_BUF_SIZE 4096 #define EP5_TIMEOUT_MS 2000 struct cx23417_fmt { u32 fourcc; /* v4l2 format id */ int depth; int flags; u32 cxformat; }; enum cx231xx_mode { CX231XX_SUSPEND, CX231XX_ANALOG_MODE, CX231XX_DIGITAL_MODE, }; enum cx231xx_std_mode { CX231XX_TV_AIR = 0, CX231XX_TV_CABLE }; enum cx231xx_stream_state { STREAM_OFF, STREAM_INTERRUPT, STREAM_ON, }; struct cx231xx; struct cx231xx_isoc_ctl { /* max packet size of isoc transaction */ int max_pkt_size; /* number of allocated urbs */ int num_bufs; /* urb for isoc transfers */ struct urb **urb; /* transfer buffers for isoc transfer */ char **transfer_buffer; /* Last buffer command and region */ u8 cmd; int pos, size, pktsize; /* Last field: ODD or EVEN? */ int field; /* Stores incomplete commands */ u32 tmp_buf; int tmp_buf_len; /* Stores already requested buffers */ struct cx231xx_buffer *buf; /* Stores the number of received fields */ int nfields; /* isoc urb callback */ int (*isoc_copy) (struct cx231xx *dev, struct urb *urb); }; struct cx231xx_bulk_ctl { /* max packet size of bulk transaction */ int max_pkt_size; /* number of allocated urbs */ int num_bufs; /* urb for bulk transfers */ struct urb **urb; /* transfer buffers for bulk transfer */ char **transfer_buffer; /* Last buffer command and region */ u8 cmd; int pos, size, pktsize; /* Last field: ODD or EVEN? */ int field; /* Stores incomplete commands */ u32 tmp_buf; int tmp_buf_len; /* Stores already requested buffers */ struct cx231xx_buffer *buf; /* Stores the number of received fields */ int nfields; /* bulk urb callback */ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb); }; struct cx231xx_fmt { char *name; u32 fourcc; /* v4l2 format id */ int depth; int reg; }; /* buffer for one video frame */ struct cx231xx_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; struct list_head frame; int top_field; int receiving; }; enum ps_package_head { CX231XX_NEED_ADD_PS_PACKAGE_HEAD = 0, CX231XX_NONEED_PS_PACKAGE_HEAD }; struct cx231xx_dmaqueue { struct list_head active; wait_queue_head_t wq; /* Counters to control buffer fill */ int pos; u8 is_partial_line; u8 partial_buf[8]; u8 last_sav; int current_field; u32 bytes_left_in_line; u32 lines_completed; u8 field1_done; u32 lines_per_field; u32 sequence; /*Mpeg2 control buffer*/ u8 *p_left_data; u32 left_data_count; u8 mpeg_buffer_done; u32 mpeg_buffer_completed; enum ps_package_head add_ps_package_head; char ps_head[10]; }; /* inputs */ #define MAX_CX231XX_INPUT 4 enum cx231xx_itype { CX231XX_VMUX_COMPOSITE1 = 1, CX231XX_VMUX_SVIDEO, CX231XX_VMUX_TELEVISION, CX231XX_VMUX_CABLE, CX231XX_RADIO, CX231XX_VMUX_DVB, }; enum cx231xx_v_input { CX231XX_VIN_1_1 = 0x1, CX231XX_VIN_2_1, CX231XX_VIN_3_1, CX231XX_VIN_4_1, CX231XX_VIN_1_2 = 0x01, CX231XX_VIN_2_2, CX231XX_VIN_3_2, CX231XX_VIN_1_3 = 0x1, CX231XX_VIN_2_3, CX231XX_VIN_3_3, }; /* cx231xx has two audio inputs: tuner and line in */ enum cx231xx_amux { /* This is the only entry for cx231xx tuner input */ CX231XX_AMUX_VIDEO, /* cx231xx tuner */ CX231XX_AMUX_LINE_IN, /* Line In */ }; struct cx231xx_reg_seq { unsigned char bit; unsigned char val; int sleep; }; struct cx231xx_input { enum cx231xx_itype type; unsigned int vmux; enum cx231xx_amux amux; struct cx231xx_reg_seq *gpio; }; #define INPUT(nr) (&cx231xx_boards[dev->model].input[nr]) enum cx231xx_decoder { CX231XX_NODECODER, CX231XX_AVDECODER }; enum CX231XX_I2C_MASTER_PORT { I2C_0 = 0, /* master 0 - internal connection */ I2C_1 = 1, /* master 1 - used with mux */ I2C_2 = 2, /* master 2 */ I2C_1_MUX_1 = 3, /* master 1 - port 1 (I2C_DEMOD_EN = 0) */ I2C_1_MUX_3 = 4 /* master 1 - port 3 (I2C_DEMOD_EN = 1) */ }; struct cx231xx_board { char *name; int vchannels; int tuner_type; int tuner_addr; v4l2_std_id norm; /* tv norm */ /* demod related */ int demod_addr; int demod_addr2; u8 demod_xfer_mode; /* 0 - Serial; 1 - parallel */ /* GPIO Pins */ struct cx231xx_reg_seq *dvb_gpio; struct cx231xx_reg_seq *suspend_gpio; struct cx231xx_reg_seq *tuner_gpio; /* Negative means don't use it */ s8 tuner_sif_gpio; s8 tuner_scl_gpio; s8 tuner_sda_gpio; /* PIN ctrl */ u32 ctl_pin_status_mask; u8 agc_analog_digital_select_gpio; u32 gpio_pin_status_mask; /* i2c masters */ u8 tuner_i2c_master; u8 demod_i2c_master; u8 ir_i2c_master; /* for devices with I2C chips for IR */ char *rc_map_name; unsigned int max_range_640_480:1; unsigned int has_dvb:1; unsigned int has_417:1; unsigned int valid:1; unsigned int no_alt_vanc:1; unsigned int external_av:1; unsigned char xclk, i2c_speed; enum cx231xx_decoder decoder; int output_mode; struct cx231xx_input input[MAX_CX231XX_INPUT]; struct cx231xx_input radio; struct rc_map *ir_codes; }; /* device states */ enum cx231xx_dev_state { DEV_INITIALIZED = 0x01, DEV_DISCONNECTED = 0x02, }; enum AFE_MODE { AFE_MODE_LOW_IF, AFE_MODE_BASEBAND, AFE_MODE_EU_HI_IF, AFE_MODE_US_HI_IF, AFE_MODE_JAPAN_HI_IF }; enum AUDIO_INPUT { AUDIO_INPUT_MUTE, AUDIO_INPUT_LINE, AUDIO_INPUT_TUNER_TV, AUDIO_INPUT_SPDIF, AUDIO_INPUT_TUNER_FM }; #define CX231XX_AUDIO_BUFS 5 #define CX231XX_NUM_AUDIO_PACKETS 16 #define CX231XX_ISO_NUM_AUDIO_PACKETS 64 /* cx231xx extensions */ #define CX231XX_AUDIO 0x10 #define CX231XX_DVB 0x20 struct cx231xx_audio { char name[50]; char *transfer_buffer[CX231XX_AUDIO_BUFS]; struct urb *urb[CX231XX_AUDIO_BUFS]; struct usb_device *udev; unsigned int capture_transfer_done; struct snd_pcm_substream *capture_pcm_substream; unsigned int hwptr_done_capture; struct snd_card *sndcard; int users, shutdown; /* locks */ spinlock_t slock; int alt; /* alternate */ int max_pkt_size; /* max packet size of isoc transaction */ int num_alt; /* Number of alternative settings */ unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ u16 end_point_addr; }; /*****************************************************************/ /* set/get i2c */ /* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */ #define I2C_SPEED_1M 0x0 #define I2C_SPEED_400K 0x1 #define I2C_SPEED_100K 0x2 #define I2C_SPEED_5M 0x3 /* 0-- STOP transaction */ #define I2C_STOP 0x0 /* 1-- do not transmit STOP at end of transaction */ #define I2C_NOSTOP 0x1 /* 1--allow slave to insert clock wait states */ #define I2C_SYNC 0x1 struct cx231xx_i2c { struct cx231xx *dev; int nr; /* i2c i/o */ struct i2c_adapter i2c_adap; int i2c_rc; /* different settings for each bus */ u8 i2c_period; u8 i2c_nostop; u8 i2c_reserve; }; struct cx231xx_i2c_xfer_data { u8 dev_addr; u8 direction; /* 1 - IN, 0 - OUT */ u8 saddr_len; /* sub address len */ u16 saddr_dat; /* sub addr data */ u8 buf_size; /* buffer size */ u8 *p_buffer; /* pointer to the buffer */ }; struct VENDOR_REQUEST_IN { u8 bRequest; u16 wValue; u16 wIndex; u16 wLength; u8 direction; u8 bData; u8 *pBuff; }; struct cx231xx_tvnorm { char *name; v4l2_std_id id; u32 cxiformat; u32 cxoformat; }; enum TRANSFER_TYPE { Raw_Video = 0, Audio, Vbi, /* VANC */ Sliced_cc, /* HANC */ TS1_serial_mode, TS2, TS1_parallel_mode } ; struct cx231xx_video_mode { /* Isoc control struct */ struct cx231xx_dmaqueue vidq; struct cx231xx_isoc_ctl isoc_ctl; struct cx231xx_bulk_ctl bulk_ctl; /* locks */ spinlock_t slock; /* usb transfer */ int alt; /* alternate */ int max_pkt_size; /* max packet size of isoc transaction */ int num_alt; /* Number of alternative settings */ unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ u16 end_point_addr; }; struct cx231xx_tsport { struct cx231xx *dev; int nr; int sram_chno; /* dma queues */ u32 ts_packet_size; u32 ts_packet_count; int width; int height; /* locks */ spinlock_t slock; /* registers */ u32 reg_gpcnt; u32 reg_gpcnt_ctl; u32 reg_dma_ctl; u32 reg_lngth; u32 reg_hw_sop_ctrl; u32 reg_gen_ctrl; u32 reg_bd_pkt_status; u32 reg_sop_status; u32 reg_fifo_ovfl_stat; u32 reg_vld_misc; u32 reg_ts_clk_en; u32 reg_ts_int_msk; u32 reg_ts_int_stat; u32 reg_src_sel; /* Default register vals */ int pci_irqmask; u32 dma_ctl_val; u32 ts_int_msk_val; u32 gen_ctrl_val; u32 ts_clk_en_val; u32 src_sel_val; u32 vld_misc_val; u32 hw_sop_ctrl_val; /* Allow a single tsport to have multiple frontends */ u32 num_frontends; void *port_priv; }; /* main device struct */ struct cx231xx { /* generic device properties */ char name[30]; /* name (including minor) of the device */ int model; /* index in the device_data struct */ int devno; /* marks the number of this device */ struct device *dev; /* pointer to USB interface's dev */ struct cx231xx_board board; /* For I2C IR support */ struct IR_i2c_init_data init_data; struct i2c_client *ir_i2c_client; unsigned int stream_on:1; /* Locks streams */ unsigned int vbi_stream_on:1; /* Locks streams for VBI */ unsigned int has_audio_class:1; unsigned int has_alsa_audio:1; unsigned int i2c_scan_running:1; /* true only during i2c_scan */ struct cx231xx_fmt *format; struct v4l2_device v4l2_dev; struct v4l2_subdev *sd_cx25840; struct v4l2_subdev *sd_tuner; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl_handler radio_ctrl_handler; struct cx2341x_handler mpeg_ctrl_handler; struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */ atomic_t stream_started; /* stream should be running if true */ struct list_head devlist; int tuner_type; /* type of the tuner */ int tuner_addr; /* tuner address */ /* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */ struct cx231xx_i2c i2c_bus[3]; struct i2c_mux_core *muxc; struct i2c_adapter *i2c_mux_adap[2]; unsigned int xc_fw_load_done:1; unsigned int port_3_switch_enabled:1; /* locks */ struct mutex gpio_i2c_lock; struct mutex i2c_lock; /* video for linux */ int users; /* user count for exclusive use */ struct video_device vdev; /* video for linux device struct */ v4l2_std_id norm; /* selected tv norm */ int ctl_freq; /* selected frequency */ unsigned int ctl_ainput; /* selected audio input */ /* frame properties */ int width; /* current frame width */ int height; /* current frame height */ int interlaced; /* 1=interlace fields, 0=just top fields */ unsigned int size; struct cx231xx_audio adev; /* states */ enum cx231xx_dev_state state; struct work_struct request_module_wk; /* locks */ struct mutex lock; struct mutex ctrl_urb_lock; /* protects urb_buf */ struct list_head inqueue, outqueue; wait_queue_head_t open, wait_frame, wait_stream; struct video_device vbi_dev; struct video_device radio_dev; #if defined(CONFIG_MEDIA_CONTROLLER) struct media_device *media_dev; struct media_pad video_pad, vbi_pad; struct media_entity input_ent[MAX_CX231XX_INPUT]; struct media_pad input_pad[MAX_CX231XX_INPUT]; #endif struct vb2_queue vidq; struct vb2_queue vbiq; unsigned char eedata[256]; struct cx231xx_video_mode video_mode; struct cx231xx_video_mode vbi_mode; struct cx231xx_video_mode sliced_cc_mode; struct cx231xx_video_mode ts1_mode; atomic_t devlist_count; struct usb_device *udev; /* the usb device */ char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */ /* helper funcs that call usb_control_msg */ int (*cx231xx_read_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int (*cx231xx_write_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int (*cx231xx_send_usb_command) (struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data); int (*cx231xx_gpio_i2c_read) (struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int (*cx231xx_gpio_i2c_write) (struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int (*cx231xx_set_analog_freq) (struct cx231xx *dev, u32 freq); int (*cx231xx_reset_analog_tuner) (struct cx231xx *dev); enum cx231xx_mode mode; struct cx231xx_dvb *dvb; /* Cx231xx supported PCB config's */ struct pcb_config current_pcb_config; u8 current_scenario_idx; u8 interface_count; u8 max_iad_interface_count; /* GPIO related register direction and values */ u32 gpio_dir; u32 gpio_val; /* Power Modes */ int power_mode; /* afe parameters */ enum AFE_MODE afe_mode; u32 afe_ref_count; /* video related parameters */ u32 video_input; u32 active_mode; u8 vbi_or_sliced_cc_mode; /* 0 - vbi ; 1 - sliced cc mode */ enum cx231xx_std_mode std_mode; /* 0 - Air; 1 - cable */ /*mode: digital=1 or analog=0*/ u8 mode_tv; u8 USE_ISO; struct cx231xx_tvnorm encodernorm; struct cx231xx_tsport ts1, ts2; struct vb2_queue mpegq; struct video_device v4l_device; atomic_t v4l_reader_count; u32 freq; unsigned int input; u32 cx23417_mailbox; u32 __iomem *lmmio; u8 __iomem *bmmio; }; extern struct list_head cx231xx_devlist; #define cx25840_call(cx231xx, o, f, args...) \ v4l2_subdev_call(cx231xx->sd_cx25840, o, f, ##args) #define tuner_call(cx231xx, o, f, args...) \ v4l2_subdev_call(cx231xx->sd_tuner, o, f, ##args) #define call_all(dev, o, f, args...) \ v4l2_device_call_until_err(&dev->v4l2_dev, 0, o, f, ##args) struct cx231xx_ops { struct list_head next; char *name; int id; int (*init) (struct cx231xx *); int (*fini) (struct cx231xx *); }; /* call back functions in dvb module */ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq); int cx231xx_reset_analog_tuner(struct cx231xx *dev); /* Provided by cx231xx-i2c.c */ void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port); int cx231xx_i2c_register(struct cx231xx_i2c *bus); void cx231xx_i2c_unregister(struct cx231xx_i2c *bus); int cx231xx_i2c_mux_create(struct cx231xx *dev); int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no); void cx231xx_i2c_mux_unregister(struct cx231xx *dev); struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port); /* Internal block control functions */ int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len, int master); int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len, int master); int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len); int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len); int cx231xx_reg_mask_write(struct cx231xx *dev, u8 dev_addr, u8 size, u16 register_address, u8 bit_start, u8 bit_end, u32 value); int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr, u16 saddr, u32 mask, u32 value); u32 cx231xx_set_field(u32 field_mask, u32 data); /*verve r/w*/ void initGPIO(struct cx231xx *dev); void uninitGPIO(struct cx231xx *dev); /* afe related functions */ int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count); int cx231xx_afe_init_channels(struct cx231xx *dev); int cx231xx_afe_setup_AFE_for_baseband(struct cx231xx *dev); int cx231xx_afe_set_input_mux(struct cx231xx *dev, u32 input_mux); int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode); int cx231xx_afe_update_power_control(struct cx231xx *dev, enum AV_MODE avmode); int cx231xx_afe_adjust_ref_count(struct cx231xx *dev, u32 video_input); /* i2s block related functions */ int cx231xx_i2s_blk_initialize(struct cx231xx *dev); int cx231xx_i2s_blk_update_power_control(struct cx231xx *dev, enum AV_MODE avmode); int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input); /* DIF related functions */ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode, u32 function_mode, u32 standard); void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode); u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd); void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode); void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev); void reset_s5h1432_demod(struct cx231xx *dev); void update_HH_register_after_set_DIF(struct cx231xx *dev); int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard); int cx231xx_tuner_pre_channel_change(struct cx231xx *dev); int cx231xx_tuner_post_channel_change(struct cx231xx *dev); /* video parser functions */ u8 cx231xx_find_next_SAV_EAV(u8 *p_buffer, u32 buffer_size, u32 *p_bytes_used); u8 cx231xx_find_boundary_SAV_EAV(u8 *p_buffer, u8 *partial_buf, u32 *p_bytes_used); int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_buffer, u32 bytes_to_copy); void cx231xx_reset_video_buffer(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q); u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q); u32 cx231xx_copy_video_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_line, u32 length, int field_number); u32 cx231xx_get_video_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 sav_eav, u8 *p_buffer, u32 buffer_size); void cx231xx_swab(u16 *from, u16 *to, u16 len); /* Provided by cx231xx-core.c */ u32 cx231xx_request_buffers(struct cx231xx *dev, u32 count); void cx231xx_queue_unusedframes(struct cx231xx *dev); void cx231xx_release_buffers(struct cx231xx *dev); /* read from control pipe */ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); /* write to control pipe */ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode); int cx231xx_send_vendor_cmd(struct cx231xx *dev, struct VENDOR_REQUEST_IN *ven_req); int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data); /* Gpio related functions */ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val, u8 len, u8 request, u8 direction); int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value); int cx231xx_set_gpio_direction(struct cx231xx *dev, int pin_number, int pin_value); int cx231xx_gpio_i2c_start(struct cx231xx *dev); int cx231xx_gpio_i2c_end(struct cx231xx *dev); int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data); int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf); int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev); int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev); int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev); int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); /* audio related functions */ int cx231xx_set_audio_decoder_input(struct cx231xx *dev, enum AUDIO_INPUT audio_input); int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type); int cx231xx_set_video_alternate(struct cx231xx *dev); int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt); int is_fw_load(struct cx231xx *dev); int cx231xx_check_fw(struct cx231xx *dev); int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)); int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)); void cx231xx_stop_TS1(struct cx231xx *dev); void cx231xx_start_TS1(struct cx231xx *dev); void cx231xx_uninit_isoc(struct cx231xx *dev); void cx231xx_uninit_bulk(struct cx231xx *dev); int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode); int cx231xx_unmute_audio(struct cx231xx *dev); int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size); void cx231xx_disable656(struct cx231xx *dev); void cx231xx_enable656(struct cx231xx *dev); int cx231xx_demod_reset(struct cx231xx *dev); int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio); /* Device list functions */ void cx231xx_release_resources(struct cx231xx *dev); void cx231xx_release_analog_resources(struct cx231xx *dev); int cx231xx_register_analog_devices(struct cx231xx *dev); void cx231xx_remove_from_devlist(struct cx231xx *dev); void cx231xx_add_into_devlist(struct cx231xx *dev); void cx231xx_init_extension(struct cx231xx *dev); void cx231xx_close_extension(struct cx231xx *dev); /* hardware init functions */ int cx231xx_dev_init(struct cx231xx *dev); void cx231xx_dev_uninit(struct cx231xx *dev); void cx231xx_config_i2c(struct cx231xx *dev); int cx231xx_config(struct cx231xx *dev); /* Stream control functions */ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask); int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask); int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type); /* Power control functions */ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode); /* chip specific control functions */ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev); int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, u8 analog_or_digital); int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3); /* video audio decoder related functions */ void video_mux(struct cx231xx *dev, int index); int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input); int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u32 input); int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev); int cx231xx_set_audio_input(struct cx231xx *dev, u8 input); /* Provided by cx231xx-video.c */ int cx231xx_register_extension(struct cx231xx_ops *dev); void cx231xx_unregister_extension(struct cx231xx_ops *dev); void cx231xx_init_extension(struct cx231xx *dev); void cx231xx_close_extension(struct cx231xx *dev); void cx231xx_v4l2_create_entities(struct cx231xx *dev); int cx231xx_querycap(struct file *file, void *priv, struct v4l2_capability *cap); int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t); int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t); int cx231xx_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f); int cx231xx_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f); int cx231xx_enum_input(struct file *file, void *priv, struct v4l2_input *i); int cx231xx_g_input(struct file *file, void *priv, unsigned int *i); int cx231xx_s_input(struct file *file, void *priv, unsigned int i); int cx231xx_g_chip_info(struct file *file, void *fh, struct v4l2_dbg_chip_info *chip); int cx231xx_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg); int cx231xx_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg); /* Provided by cx231xx-cards.c */ extern void cx231xx_pre_card_setup(struct cx231xx *dev); extern void cx231xx_card_setup(struct cx231xx *dev); extern struct cx231xx_board cx231xx_boards[]; extern struct usb_device_id cx231xx_id_table[]; int cx231xx_tuner_callback(void *ptr, int component, int command, int arg); /* cx23885-417.c */ extern int cx231xx_417_register(struct cx231xx *dev); extern void cx231xx_417_unregister(struct cx231xx *dev); /* cx23885-input.c */ #if defined(CONFIG_VIDEO_CX231XX_RC) int cx231xx_ir_init(struct cx231xx *dev); void cx231xx_ir_exit(struct cx231xx *dev); #else static inline int cx231xx_ir_init(struct cx231xx *dev) { return 0; } static inline void cx231xx_ir_exit(struct cx231xx *dev) {} #endif static inline unsigned int norm_maxw(struct cx231xx *dev) { if (dev->board.max_range_640_480) return 640; else return 720; } static inline unsigned int norm_maxh(struct cx231xx *dev) { if (dev->board.max_range_640_480) return 480; else return (dev->norm & V4L2_STD_625_50) ? 576 : 480; } #endif
2 2 2 2 2 2 2 2 2 2 1 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 // SPDX-License-Identifier: GPL-2.0+ /* * Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC * * Current development and maintenance: * (C) 2001-2002 Björn Stenberg (bjorn@haxx.se) * * Developed with the assistance of: * (C) 2002 Alan Stern <stern@rowland.org> * * Initial work: * (C) 2000 In-System Design, Inc. (support@in-system.com) * * The ISD200 ASIC does not natively support ATA devices. The chip * does implement an interface, the ATA Command Block (ATACB) which provides * a means of passing ATA commands and ATA register accesses to a device. * * History: * * 2002-10-19: Removed the specialized transfer routines. * (Alan Stern <stern@rowland.harvard.edu>) * 2001-02-24: Removed lots of duplicate code and simplified the structure. * (bjorn@haxx.se) * 2002-01-16: Fixed endianness bug so it works on the ppc arch. * (Luc Saillard <luc@saillard.org>) * 2002-01-17: All bitfields removed. * (bjorn@haxx.se) */ /* Include files */ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/scatterlist.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-isd200" MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC"); MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); static int isd200_Initialization(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id isd200_usb_ids[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, isd200_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev isd200_unusual_dev_list[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* Timeout defines (in Seconds) */ #define ISD200_ENUM_BSY_TIMEOUT 35 #define ISD200_ENUM_DETECT_TIMEOUT 30 #define ISD200_DEFAULT_TIMEOUT 30 /* device flags */ #define DF_ATA_DEVICE 0x0001 #define DF_MEDIA_STATUS_ENABLED 0x0002 #define DF_REMOVABLE_MEDIA 0x0004 /* capability bit definitions */ #define CAPABILITY_DMA 0x01 #define CAPABILITY_LBA 0x02 /* command_setX bit definitions */ #define COMMANDSET_REMOVABLE 0x02 #define COMMANDSET_MEDIA_STATUS 0x10 /* ATA Vendor Specific defines */ #define ATA_ADDRESS_DEVHEAD_STD 0xa0 #define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40 #define ATA_ADDRESS_DEVHEAD_SLAVE 0x10 /* Action Select bits */ #define ACTION_SELECT_0 0x01 #define ACTION_SELECT_1 0x02 #define ACTION_SELECT_2 0x04 #define ACTION_SELECT_3 0x08 #define ACTION_SELECT_4 0x10 #define ACTION_SELECT_5 0x20 #define ACTION_SELECT_6 0x40 #define ACTION_SELECT_7 0x80 /* Register Select bits */ #define REG_ALTERNATE_STATUS 0x01 #define REG_DEVICE_CONTROL 0x01 #define REG_ERROR 0x02 #define REG_FEATURES 0x02 #define REG_SECTOR_COUNT 0x04 #define REG_SECTOR_NUMBER 0x08 #define REG_CYLINDER_LOW 0x10 #define REG_CYLINDER_HIGH 0x20 #define REG_DEVICE_HEAD 0x40 #define REG_STATUS 0x80 #define REG_COMMAND 0x80 /* ATA registers offset definitions */ #define ATA_REG_ERROR_OFFSET 1 #define ATA_REG_LCYL_OFFSET 4 #define ATA_REG_HCYL_OFFSET 5 #define ATA_REG_STATUS_OFFSET 7 /* ATA error definitions not in <linux/hdreg.h> */ #define ATA_ERROR_MEDIA_CHANGE 0x20 /* ATA command definitions not in <linux/hdreg.h> */ #define ATA_COMMAND_GET_MEDIA_STATUS 0xDA #define ATA_COMMAND_MEDIA_EJECT 0xED /* ATA drive control definitions */ #define ATA_DC_DISABLE_INTERRUPTS 0x02 #define ATA_DC_RESET_CONTROLLER 0x04 #define ATA_DC_REENABLE_CONTROLLER 0x00 /* * General purpose return codes */ #define ISD200_ERROR -1 #define ISD200_GOOD 0 /* * Transport return codes */ #define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */ #define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */ #define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */ /* driver action codes */ #define ACTION_READ_STATUS 0 #define ACTION_RESET 1 #define ACTION_REENABLE 2 #define ACTION_SOFT_RESET 3 #define ACTION_ENUM 4 #define ACTION_IDENTIFY 5 /* * ata_cdb struct */ union ata_cdb { struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char WriteData3F6; unsigned char WriteData1F1; unsigned char WriteData1F2; unsigned char WriteData1F3; unsigned char WriteData1F4; unsigned char WriteData1F5; unsigned char WriteData1F6; unsigned char WriteData1F7; unsigned char Reserved[3]; } generic; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char AlternateStatusByte; unsigned char ErrorByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char StatusByte; unsigned char Reserved[3]; } read; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char DeviceControlByte; unsigned char FeaturesByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char CommandByte; unsigned char Reserved[3]; } write; }; /* * Inquiry data structure. This is the data returned from the target * after it receives an inquiry. * * This structure may be extended by the number of bytes specified * in the field AdditionalLength. The defined size constant only * includes fields through ProductRevisionLevel. */ /* * DeviceType field */ #define DIRECT_ACCESS_DEVICE 0x00 /* disks */ #define DEVICE_REMOVABLE 0x80 struct inquiry_data { unsigned char DeviceType; unsigned char DeviceTypeModifier; unsigned char Versions; unsigned char Format; unsigned char AdditionalLength; unsigned char Reserved[2]; unsigned char Capability; unsigned char VendorId[8]; unsigned char ProductId[16]; unsigned char ProductRevisionLevel[4]; unsigned char VendorSpecific[20]; unsigned char Reserved3[40]; } __attribute__ ((packed)); /* * INQUIRY data buffer size */ #define INQUIRYDATABUFFERSIZE 36 /* * ISD200 CONFIG data struct */ #define ATACFG_TIMING 0x0f #define ATACFG_ATAPI_RESET 0x10 #define ATACFG_MASTER 0x20 #define ATACFG_BLOCKSIZE 0xa0 #define ATACFGE_LAST_LUN 0x07 #define ATACFGE_DESC_OVERRIDE 0x08 #define ATACFGE_STATE_SUSPEND 0x10 #define ATACFGE_SKIP_BOOT 0x20 #define ATACFGE_CONF_DESC2 0x40 #define ATACFGE_INIT_STATUS 0x80 #define CFG_CAPABILITY_SRST 0x01 struct isd200_config { unsigned char EventNotification; unsigned char ExternalClock; unsigned char ATAInitTimeout; unsigned char ATAConfig; unsigned char ATAMajorCommand; unsigned char ATAMinorCommand; unsigned char ATAExtraConfig; unsigned char Capability; }__attribute__ ((packed)); /* * ISD200 driver information struct */ struct isd200_info { struct inquiry_data InquiryData; u16 *id; struct isd200_config ConfigData; unsigned char *RegsBuf; unsigned char ATARegs[8]; unsigned char DeviceHead; unsigned char DeviceFlags; /* maximum number of LUNs supported */ unsigned char MaxLUNs; unsigned char cmnd[MAX_COMMAND_SIZE]; struct scsi_cmnd srb; struct scatterlist sg; }; /* * Read Capacity Data - returned in Big Endian format */ struct read_capacity_data { __be32 LogicalBlockAddress; __be32 BytesPerBlock; }; /* * Read Block Limits Data - returned in Big Endian format * This structure returns the maximum and minimum block * size for a TAPE device. */ struct read_block_limits { unsigned char Reserved; unsigned char BlockMaximumSize[3]; unsigned char BlockMinimumSize[2]; }; /* * Sense Data Format */ #define SENSE_ERRCODE 0x7f #define SENSE_ERRCODE_VALID 0x80 #define SENSE_FLAG_SENSE_KEY 0x0f #define SENSE_FLAG_BAD_LENGTH 0x20 #define SENSE_FLAG_END_OF_MEDIA 0x40 #define SENSE_FLAG_FILE_MARK 0x80 struct sense_data { unsigned char ErrorCode; unsigned char SegmentNumber; unsigned char Flags; unsigned char Information[4]; unsigned char AdditionalSenseLength; unsigned char CommandSpecificInformation[4]; unsigned char AdditionalSenseCode; unsigned char AdditionalSenseCodeQualifier; unsigned char FieldReplaceableUnitCode; unsigned char SenseKeySpecific[3]; } __attribute__ ((packed)); /* * Default request sense buffer size */ #define SENSE_BUFFER_SIZE 18 /*********************************************************************** * Helper routines ***********************************************************************/ /************************************************************************** * isd200_build_sense * * Builds an artificial sense buffer to report the results of a * failed command. * * RETURNS: * void */ static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb) { struct isd200_info *info = (struct isd200_info *)us->extra; struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0]; unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET]; if(error & ATA_ERROR_MEDIA_CHANGE) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_MCR) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_TRK0NF) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = NOT_READY; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_UNC) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = DATA_PROTECT; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else { buf->ErrorCode = 0; buf->AdditionalSenseLength = 0; buf->Flags = 0; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } } /*********************************************************************** * Transport routines ***********************************************************************/ /************************************************************************** * isd200_set_srb(), isd200_srb_set_bufflen() * * Two helpers to facilitate in initialization of scsi_cmnd structure * Will need to change when struct scsi_cmnd changes */ static void isd200_set_srb(struct isd200_info *info, enum dma_data_direction dir, void* buff, unsigned bufflen) { struct scsi_cmnd *srb = &info->srb; if (buff) sg_init_one(&info->sg, buff, bufflen); srb->sc_data_direction = dir; srb->sdb.table.sgl = buff ? &info->sg : NULL; srb->sdb.length = bufflen; srb->sdb.table.nents = buff ? 1 : 0; } static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen) { srb->sdb.length = bufflen; } /************************************************************************** * isd200_action * * Routine for sending commands to the isd200 * * RETURNS: * ISD status code */ static int isd200_action( struct us_data *us, int action, void* pointer, int value ) { union ata_cdb ata; /* static to prevent this large struct being placed on the valuable stack */ static struct scsi_device srb_dev; struct isd200_info *info = (struct isd200_info *)us->extra; struct scsi_cmnd *srb = &info->srb; int status; memset(&ata, 0, sizeof(ata)); memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE); srb->device = &srb_dev; ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ata.generic.TransferBlockSize = 1; switch ( action ) { case ACTION_READ_STATUS: usb_stor_dbg(us, " isd200_action(READ_STATUS)\n"); ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2; ata.generic.RegisterSelect = REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_STATUS | REG_ERROR; isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value); break; case ACTION_ENUM: usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4| ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD; ata.write.DeviceHeadByte = value; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_RESET: usb_stor_dbg(us, " isd200_action(RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_REENABLE: usb_stor_dbg(us, " isd200_action(REENABLE)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_SOFT_RESET: usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND; ata.write.DeviceHeadByte = info->DeviceHead; ata.write.CommandByte = ATA_CMD_DEV_RESET; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_IDENTIFY: usb_stor_dbg(us, " isd200_action(IDENTIFY)\n"); ata.generic.RegisterSelect = REG_COMMAND; ata.write.CommandByte = ATA_CMD_ID_ATA; isd200_set_srb(info, DMA_FROM_DEVICE, info->id, ATA_ID_WORDS * 2); break; default: usb_stor_dbg(us, "Error: Undefined action %d\n", action); return ISD200_ERROR; } memcpy(srb->cmnd, &ata, sizeof(ata.generic)); srb->cmd_len = sizeof(ata.generic); status = usb_stor_Bulk_transport(srb, us); if (status == USB_STOR_TRANSPORT_GOOD) status = ISD200_GOOD; else { usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n", action, status); status = ISD200_ERROR; /* need to reset device here */ } return status; } /************************************************************************** * isd200_read_regs * * Read ATA Registers * * RETURNS: * ISD status code */ static int isd200_read_regs( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n"); transferStatus = isd200_action( us, ACTION_READ_STATUS, info->RegsBuf, sizeof(info->ATARegs) ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error reading ATA registers\n"); retStatus = ISD200_ERROR; } else { memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs)); usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n", info->ATARegs[ATA_REG_ERROR_OFFSET]); } return retStatus; } /************************************************************************** * Invoke the transport and basic error-handling/recovery methods * * This is used by the protocol layers to actually send the message to * the device and receive the response. */ static void isd200_invoke_transport( struct us_data *us, struct scsi_cmnd *srb, union ata_cdb *ataCdb ) { int need_auto_sense = 0; int transferStatus; int result; /* send the command to the transport layer */ memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic)); srb->cmd_len = sizeof(ataCdb->generic); transferStatus = usb_stor_Bulk_transport(srb, us); /* * if the command gets aborted by the higher layers, we need to * short-circuit all other processing */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- command was aborted\n"); goto Handle_Abort; } switch (transferStatus) { case USB_STOR_TRANSPORT_GOOD: /* Indicate a good result */ srb->result = SAM_STAT_GOOD; break; case USB_STOR_TRANSPORT_NO_SENSE: usb_stor_dbg(us, "-- transport indicates protocol failure\n"); srb->result = SAM_STAT_CHECK_CONDITION; return; case USB_STOR_TRANSPORT_FAILED: usb_stor_dbg(us, "-- transport indicates command failure\n"); need_auto_sense = 1; break; case USB_STOR_TRANSPORT_ERROR: usb_stor_dbg(us, "-- transport indicates transport error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; default: usb_stor_dbg(us, "-- transport indicates unknown error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; } if ((scsi_get_resid(srb) > 0) && !((srb->cmnd[0] == REQUEST_SENSE) || (srb->cmnd[0] == INQUIRY) || (srb->cmnd[0] == MODE_SENSE) || (srb->cmnd[0] == LOG_SENSE) || (srb->cmnd[0] == MODE_SENSE_10))) { usb_stor_dbg(us, "-- unexpectedly short transfer\n"); need_auto_sense = 1; } if (need_auto_sense) { result = isd200_read_regs(us); if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- auto-sense aborted\n"); goto Handle_Abort; } if (result == ISD200_GOOD) { isd200_build_sense(us, srb); srb->result = SAM_STAT_CHECK_CONDITION; /* If things are really okay, then let's show that */ if ((srb->sense_buffer[2] & 0xf) == 0x0) srb->result = SAM_STAT_GOOD; } else { srb->result = DID_ERROR << 16; /* Need reset here */ } } /* * Regardless of auto-sense, if we _know_ we have an error * condition, show that in the result code */ if (transferStatus == USB_STOR_TRANSPORT_FAILED) srb->result = SAM_STAT_CHECK_CONDITION; return; /* * abort processing: the bulk-only transport requires a reset * following an abort */ Handle_Abort: srb->result = DID_ABORT << 16; /* permit the reset transfer to take place */ clear_bit(US_FLIDX_ABORTING, &us->dflags); /* Need reset here */ } #ifdef CONFIG_USB_STORAGE_DEBUG static void isd200_log_config(struct us_data *us, struct isd200_info *info) { usb_stor_dbg(us, " Event Notification: 0x%x\n", info->ConfigData.EventNotification); usb_stor_dbg(us, " External Clock: 0x%x\n", info->ConfigData.ExternalClock); usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n", info->ConfigData.ATAInitTimeout); usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n", (info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6); usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_MASTER); usb_stor_dbg(us, " ATAPI Reset: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET); usb_stor_dbg(us, " ATA Timing: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_TIMING); usb_stor_dbg(us, " ATA Major Command: 0x%x\n", info->ConfigData.ATAMajorCommand); usb_stor_dbg(us, " ATA Minor Command: 0x%x\n", info->ConfigData.ATAMinorCommand); usb_stor_dbg(us, " Init Status: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS); usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2); usb_stor_dbg(us, " Skip Device Boot: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT); usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND); usb_stor_dbg(us, " Descriptor Override: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE); usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN); usb_stor_dbg(us, " SRST Enable: 0x%x\n", info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST); } #endif /************************************************************************** * isd200_write_config * * Write the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_write_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; #ifdef CONFIG_USB_STORAGE_DEBUG usb_stor_dbg(us, "Entering isd200_write_config\n"); usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n"); isd200_log_config(us, info); #endif /* let's send the command via the control pipe */ result = usb_stor_ctrl_transfer( us, us->send_ctrl_pipe, 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " ISD200 Config Data was written successfully\n"); } else { usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_read_config * * Reads the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_read_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; usb_stor_dbg(us, "Entering isd200_read_config\n"); /* read the configuration information from ISD200. Use this to */ /* determine what the special ATA CDB bytes are. */ result = usb_stor_ctrl_transfer( us, us->recv_ctrl_pipe, 0x02, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n"); #ifdef CONFIG_USB_STORAGE_DEBUG isd200_log_config(us, info); #endif } else { usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_atapi_soft_reset * * Perform an Atapi Soft Reset on the device * * RETURNS: * NT status code */ static int isd200_atapi_soft_reset( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n"); transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_srst * * Perform an SRST on the device * * RETURNS: * ISD status code */ static int isd200_srst( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_SRST\n"); transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 ); /* check to see if this request failed */ if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing SRST\n"); retStatus = ISD200_ERROR; } else { /* delay 10ms to give the drive a chance to see it */ msleep(10); transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error taking drive out of reset\n"); retStatus = ISD200_ERROR; } else { /* delay 50ms to give the drive a chance to recover after SRST */ msleep(50); } } usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_try_enum * * Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS * and tries to analyze the status registers * * RETURNS: * ISD status code */ static int isd200_try_enum(struct us_data *us, unsigned char master_slave, int detect ) { int status = ISD200_GOOD; unsigned long endTime; struct isd200_info *info = (struct isd200_info *)us->extra; unsigned char *regs = info->RegsBuf; int recheckAsMaster = 0; if ( detect ) endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ; else endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ; /* loop until we detect !BSY or timeout */ while(1) { status = isd200_action( us, ACTION_ENUM, NULL, master_slave ); if ( status != ISD200_GOOD ) break; status = isd200_action( us, ACTION_READ_STATUS, regs, 8 ); if ( status != ISD200_GOOD ) break; if (!detect) { if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) { usb_stor_dbg(us, " %s status is still BSY, try again...\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); } else { usb_stor_dbg(us, " %s status !BSY, continue with next operation\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); break; } } /* check for ATA_BUSY and */ /* ATA_DF (workaround ATA Zip drive) and */ /* ATA_ERR (workaround for Archos CD-ROM) */ else if (regs[ATA_REG_STATUS_OFFSET] & (ATA_BUSY | ATA_DF | ATA_ERR)) { usb_stor_dbg(us, " Status indicates it is not ready, try again...\n"); } /* check for DRDY, ATA devices set DRDY after SRST */ else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) { usb_stor_dbg(us, " Identified ATA device\n"); info->DeviceFlags |= DF_ATA_DEVICE; info->DeviceHead = master_slave; break; } /* * check Cylinder High/Low to * determine if it is an ATAPI device */ else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB && regs[ATA_REG_LCYL_OFFSET] == 0x14) { /* * It seems that the RICOH * MP6200A CD/RW drive will * report itself okay as a * slave when it is really a * master. So this check again * as a master device just to * make sure it doesn't report * itself okay as a master also */ if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) && !recheckAsMaster) { usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n"); recheckAsMaster = 1; master_slave = ATA_ADDRESS_DEVHEAD_STD; } else { usb_stor_dbg(us, " Identified ATAPI device\n"); info->DeviceHead = master_slave; status = isd200_atapi_soft_reset(us); break; } } else { usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n"); break; } /* check for timeout on this request */ if (time_after_eq(jiffies, endTime)) { if (!detect) usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n"); else usb_stor_dbg(us, " Device detect timeout!\n"); break; } } return status; } /************************************************************************** * isd200_manual_enum * * Determines if the drive attached is an ATA or ATAPI and if it is a * master or slave. * * RETURNS: * ISD status code */ static int isd200_manual_enum(struct us_data *us) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; usb_stor_dbg(us, "Entering isd200_manual_enum\n"); retStatus = isd200_read_config(us); if (retStatus == ISD200_GOOD) { int isslave; /* master or slave? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0); if (retStatus == ISD200_GOOD) retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0); if (retStatus == ISD200_GOOD) { retStatus = isd200_srst(us); if (retStatus == ISD200_GOOD) /* ata or atapi? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1); } isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0; if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) { usb_stor_dbg(us, " Setting Master/Slave selection to %d\n", isslave); info->ConfigData.ATAConfig &= 0x3f; info->ConfigData.ATAConfig |= (isslave<<6); retStatus = isd200_write_config(us); } } usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus); return(retStatus); } static void isd200_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < ATA_ID_WORDS; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } static void isd200_dump_driveid(struct us_data *us, u16 *id) { usb_stor_dbg(us, " Identify Data Structure:\n"); usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]); usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]); usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]); usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]); usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]); usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]); usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]); usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]); usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]); usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]); usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]); usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]); usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff); usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]); usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8); usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8); usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8); usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]); usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]); usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]); usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]); usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57)); usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff); usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY)); usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]); usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]); } /************************************************************************** * isd200_get_inquiry_data * * Get inquiry data * * RETURNS: * ISD status code */ static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); /* set default to Master */ info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD; /* attempt to manually enumerate this device */ retStatus = isd200_manual_enum(us); if (retStatus == ISD200_GOOD) { int transferStatus; /* check for an ATA device */ if (info->DeviceFlags & DF_ATA_DEVICE) { /* this must be an ATA device */ /* perform an ATA Command Identify */ transferStatus = isd200_action( us, ACTION_IDENTIFY, id, ATA_ID_WORDS * 2); if (transferStatus != ISD200_TRANSPORT_GOOD) { /* Error issuing ATA Command Identify */ usb_stor_dbg(us, " Error issuing ATA Command Identify\n"); retStatus = ISD200_ERROR; } else { /* ATA Command Identify successful */ int i; __be16 *src; __u16 *dest; isd200_fix_driveid(id); isd200_dump_driveid(us, id); /* Prevent division by 0 in isd200_scsi_to_ata() */ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { usb_stor_dbg(us, " Invalid ATA Identify data\n"); retStatus = ISD200_ERROR; goto Done; } memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE; /* The length must be at least 36 (5 + 31) */ info->InquiryData.AdditionalLength = 0x1F; if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) { /* set the removable bit */ info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE; info->DeviceFlags |= DF_REMOVABLE_MEDIA; } /* Fill in vendor identification fields */ src = (__be16 *)&id[ATA_ID_PROD]; dest = (__u16*)info->InquiryData.VendorId; for (i = 0; i < 4; i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_PROD + 8/2]; dest = (__u16*)info->InquiryData.ProductId; for (i=0;i<8;i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_FW_REV]; dest = (__u16*)info->InquiryData.ProductRevisionLevel; for (i=0;i<2;i++) dest[i] = be16_to_cpu(src[i]); /* determine if it supports Media Status Notification */ if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) { usb_stor_dbg(us, " Device supports Media Status Notification\n"); /* * Indicate that it is enabled, even * though it is not. * This allows the lock/unlock of the * media to work correctly. */ info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED; } else info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED; } } else { /* * this must be an ATAPI device * use an ATAPI protocol (Transparent SCSI) */ us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; usb_stor_dbg(us, "Protocol changed to: %s\n", us->protocol_name); /* Free driver structure */ us->extra_destructor(info); kfree(info); us->extra = NULL; us->extra_destructor = NULL; } } Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); } /************************************************************************** * isd200_scsi_to_ata * * Translate SCSI commands to ATA commands. * * RETURNS: * 1 if the command needs to be sent to the transport layer * 0 otherwise */ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us, union ata_cdb * ataCdb) { struct isd200_info *info = (struct isd200_info *)us->extra; u16 *id = info->id; int sendToTransport = 1; unsigned char sectnum, head; unsigned short cylinder; unsigned long lba; unsigned long blockCount; unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; memset(ataCdb, 0, sizeof(union ata_cdb)); /* SCSI Command */ switch (srb->cmnd[0]) { case INQUIRY: usb_stor_dbg(us, " ATA OUT - INQUIRY\n"); /* copy InquiryData */ usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData, sizeof(info->InquiryData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; break; case MODE_SENSE: usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n"); /* Initialize the return buffer */ usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case TEST_UNIT_READY: usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n"); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_CAPACITY: { unsigned long capacity; struct read_capacity_data readCapacityData; usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n"); if (ata_id_has_lba(id)) capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1; else capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] * id[ATA_ID_SECTORS]) - 1; readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity); readCapacityData.BytesPerBlock = cpu_to_be32(0x200); usb_stor_set_xfer_buf((unsigned char *) &readCapacityData, sizeof(readCapacityData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_READ; break; case WRITE_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE; break; case ALLOW_MEDIUM_REMOVAL: usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n"); if (info->DeviceFlags & DF_REMOVABLE_MEDIA) { usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Not removable media, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case START_STOP: usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n"); usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); if ((srb->cmnd[4] & 0x3) == 0x2) { usb_stor_dbg(us, " Media Eject\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 0; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT; } else if ((srb->cmnd[4] & 0x3) == 0x1) { usb_stor_dbg(us, " Get Media Status\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Nothing to do, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; default: usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n", srb->cmnd[0]); srb->result = DID_ERROR << 16; sendToTransport = 0; break; } return(sendToTransport); } /************************************************************************** * isd200_free_info * * Frees the driver structure. */ static void isd200_free_info_ptrs(void *info_) { struct isd200_info *info = (struct isd200_info *) info_; if (info) { kfree(info->id); kfree(info->RegsBuf); kfree(info->srb.sense_buffer); } } /************************************************************************** * isd200_init_info * * Allocates (if necessary) and initializes the driver structure. * * RETURNS: * error status code */ static int isd200_init_info(struct us_data *us) { struct isd200_info *info; info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL); if (!info) return -ENOMEM; info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { isd200_free_info_ptrs(info); kfree(info); return -ENOMEM; } us->extra = info; us->extra_destructor = isd200_free_info_ptrs; return 0; } /************************************************************************** * Initialization for the ISD200 */ static int isd200_Initialization(struct us_data *us) { int rc = 0; usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); rc = -ENOMEM; } else { /* Get device specific data */ if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); rc = -EINVAL; } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); } } return rc; } /************************************************************************** * Protocol and Transport for the ISD200 ASIC * * This protocol and transport are for ATA devices connected to an ISD200 * ASIC. An ATAPI device that is connected as a slave device will be * detected in the driver initialization function and the protocol will * be changed to an ATAPI protocol (Transparent SCSI). * */ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) { int sendToTransport, orig_bufflen; union ata_cdb ataCdb; /* Make sure driver was initialized */ if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); srb->result = DID_ERROR << 16; return; } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ orig_bufflen = scsi_bufflen(srb); sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb); /* send the command to the transport layer */ if (sendToTransport) isd200_invoke_transport(us, srb, &ataCdb); isd200_srb_set_bufflen(srb, orig_bufflen); } static struct scsi_host_template isd200_host_template; static int isd200_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - isd200_usb_ids) + isd200_unusual_dev_list, &isd200_host_template); if (result) return result; us->protocol_name = "ISD200 ATA/ATAPI"; us->proto_handler = isd200_ata_command; result = usb_stor_probe2(us); return result; } static struct usb_driver isd200_driver = { .name = DRV_NAME, .probe = isd200_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = isd200_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME);
5 5 5 5 5 5 5 5 5 5 5 3 3 2 1 5 5 5 7 1 7 2 1 6 6 3 3 2 2 6 1 1 5 5 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 // SPDX-License-Identifier: GPL-2.0-or-later /**************************************************************** Siano Mobile Silicon, Inc. MDTV receiver kernel modules. Copyright (C) 2005-2009, Uri Shkolnik, Anatoly Greenblat ****************************************************************/ #include "smscoreapi.h" #include <linux/kernel.h> #include <linux/init.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/module.h> #include <media/media-device.h> #include "sms-cards.h" #include "smsendian.h" #define USB1_BUFFER_SIZE 0x1000 #define USB2_BUFFER_SIZE 0x2000 #define MAX_BUFFERS 50 #define MAX_URBS 10 struct smsusb_device_t; enum smsusb_state { SMSUSB_DISCONNECTED, SMSUSB_SUSPENDED, SMSUSB_ACTIVE }; struct smsusb_urb_t { struct list_head entry; struct smscore_buffer_t *cb; struct smsusb_device_t *dev; struct urb *urb; /* For the bottom half */ struct work_struct wq; }; struct smsusb_device_t { struct usb_device *udev; struct smscore_device_t *coredev; struct smsusb_urb_t surbs[MAX_URBS]; int response_alignment; int buffer_size; unsigned char in_ep; unsigned char out_ep; enum smsusb_state state; }; static int smsusb_submit_urb(struct smsusb_device_t *dev, struct smsusb_urb_t *surb); /* * Completing URB's callback handler - bottom half (process context) * submits the URB prepared on smsusb_onresponse() */ static void do_submit_urb(struct work_struct *work) { struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq); struct smsusb_device_t *dev = surb->dev; smsusb_submit_urb(dev, surb); } /* * Completing URB's callback handler - top half (interrupt context) * adds completing sms urb to the global surbs list and activtes the worker * thread the surb * IMPORTANT - blocking functions must not be called from here !!! * @param urb pointer to a completing urb object */ static void smsusb_onresponse(struct urb *urb) { struct smsusb_urb_t *surb = (struct smsusb_urb_t *) urb->context; struct smsusb_device_t *dev = surb->dev; if (urb->status == -ESHUTDOWN) { pr_err("error, urb status %d (-ESHUTDOWN), %d bytes\n", urb->status, urb->actual_length); return; } if ((urb->actual_length > 0) && (urb->status == 0)) { struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)surb->cb->p; smsendian_handle_message_header(phdr); if (urb->actual_length >= phdr->msg_length) { surb->cb->size = phdr->msg_length; if (dev->response_alignment && (phdr->msg_flags & MSG_HDR_FLAG_SPLIT_MSG)) { surb->cb->offset = dev->response_alignment + ((phdr->msg_flags >> 8) & 3); /* sanity check */ if (((int) phdr->msg_length + surb->cb->offset) > urb->actual_length) { pr_err("invalid response msglen %d offset %d size %d\n", phdr->msg_length, surb->cb->offset, urb->actual_length); goto exit_and_resubmit; } /* move buffer pointer and * copy header to its new location */ memcpy((char *) phdr + surb->cb->offset, phdr, sizeof(struct sms_msg_hdr)); } else surb->cb->offset = 0; pr_debug("received %s(%d) size: %d\n", smscore_translate_msg(phdr->msg_type), phdr->msg_type, phdr->msg_length); smsendian_handle_rx_message((struct sms_msg_data *) phdr); smscore_onresponse(dev->coredev, surb->cb); surb->cb = NULL; } else { pr_err("invalid response msglen %d actual %d\n", phdr->msg_length, urb->actual_length); } } else pr_err("error, urb status %d, %d bytes\n", urb->status, urb->actual_length); exit_and_resubmit: INIT_WORK(&surb->wq, do_submit_urb); schedule_work(&surb->wq); } static int smsusb_submit_urb(struct smsusb_device_t *dev, struct smsusb_urb_t *surb) { if (!surb->cb) { /* This function can sleep */ surb->cb = smscore_getbuffer(dev->coredev); if (!surb->cb) { pr_err("smscore_getbuffer(...) returned NULL\n"); return -ENOMEM; } } usb_fill_bulk_urb( surb->urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->in_ep), surb->cb->p, dev->buffer_size, smsusb_onresponse, surb ); surb->urb->transfer_flags |= URB_FREE_BUFFER; return usb_submit_urb(surb->urb, GFP_ATOMIC); } static void smsusb_stop_streaming(struct smsusb_device_t *dev) { int i; for (i = 0; i < MAX_URBS; i++) { usb_kill_urb(dev->surbs[i].urb); if (dev->surbs[i].wq.func) cancel_work_sync(&dev->surbs[i].wq); if (dev->surbs[i].cb) { smscore_putbuffer(dev->coredev, dev->surbs[i].cb); dev->surbs[i].cb = NULL; } } } static int smsusb_start_streaming(struct smsusb_device_t *dev) { int i, rc; for (i = 0; i < MAX_URBS; i++) { rc = smsusb_submit_urb(dev, &dev->surbs[i]); if (rc < 0) { pr_err("smsusb_submit_urb(...) failed\n"); smsusb_stop_streaming(dev); break; } } return rc; } static int smsusb_sendrequest(void *context, void *buffer, size_t size) { struct smsusb_device_t *dev = (struct smsusb_device_t *) context; struct sms_msg_hdr *phdr; int dummy, ret; if (dev->state != SMSUSB_ACTIVE) { pr_debug("Device not active yet\n"); return -ENOENT; } phdr = kmemdup(buffer, size, GFP_KERNEL); if (!phdr) return -ENOMEM; pr_debug("sending %s(%d) size: %d\n", smscore_translate_msg(phdr->msg_type), phdr->msg_type, phdr->msg_length); smsendian_handle_tx_message((struct sms_msg_data *) phdr); smsendian_handle_message_header((struct sms_msg_hdr *)phdr); ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), phdr, size, &dummy, 1000); kfree(phdr); return ret; } static char *smsusb1_fw_lkup[] = { "dvbt_stellar_usb.inp", "dvbh_stellar_usb.inp", "tdmb_stellar_usb.inp", "none", "dvbt_bda_stellar_usb.inp", }; static inline char *sms_get_fw_name(int mode, int board_id) { char **fw = sms_get_board(board_id)->fw; return (fw && fw[mode]) ? fw[mode] : smsusb1_fw_lkup[mode]; } static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id) { const struct firmware *fw; u8 *fw_buffer; int rc, dummy; char *fw_filename; if (id < 0) id = sms_get_board(board_id)->default_mode; if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) { pr_err("invalid firmware id specified %d\n", id); return -EINVAL; } fw_filename = sms_get_fw_name(id, board_id); rc = request_firmware(&fw, fw_filename, &udev->dev); if (rc < 0) { pr_warn("failed to open '%s' mode %d, trying again with default firmware\n", fw_filename, id); fw_filename = smsusb1_fw_lkup[id]; rc = request_firmware(&fw, fw_filename, &udev->dev); if (rc < 0) { pr_warn("failed to open '%s' mode %d\n", fw_filename, id); return rc; } } fw_buffer = kmemdup(fw->data, fw->size, GFP_KERNEL); if (fw_buffer) { rc = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 2), fw_buffer, fw->size, &dummy, 1000); pr_debug("sent %zu(%d) bytes, rc %d\n", fw->size, dummy, rc); kfree(fw_buffer); } else { pr_err("failed to allocate firmware buffer\n"); rc = -ENOMEM; } pr_debug("read FW %s, size=%zu\n", fw_filename, fw->size); release_firmware(fw); return rc; } static void smsusb1_detectmode(void *context, int *mode) { char *product_string = ((struct smsusb_device_t *) context)->udev->product; *mode = DEVICE_MODE_NONE; if (!product_string) { product_string = "none"; pr_err("product string not found\n"); } else if (strstr(product_string, "DVBH")) *mode = 1; else if (strstr(product_string, "BDA")) *mode = 4; else if (strstr(product_string, "DVBT")) *mode = 0; else if (strstr(product_string, "TDMB")) *mode = 2; pr_debug("%d \"%s\"\n", *mode, product_string); } static int smsusb1_setmode(void *context, int mode) { struct sms_msg_hdr msg = { MSG_SW_RELOAD_REQ, 0, HIF_TASK, sizeof(struct sms_msg_hdr), 0 }; if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_DVBT_BDA) { pr_err("invalid firmware id specified %d\n", mode); return -EINVAL; } return smsusb_sendrequest(context, &msg, sizeof(msg)); } static void smsusb_term_device(struct usb_interface *intf) { struct smsusb_device_t *dev = usb_get_intfdata(intf); if (dev) { int i; dev->state = SMSUSB_DISCONNECTED; smsusb_stop_streaming(dev); /* unregister from smscore */ if (dev->coredev) smscore_unregister_device(dev->coredev); for (i = 0; i < MAX_URBS; i++) usb_free_urb(dev->surbs[i].urb); pr_debug("device 0x%p destroyed\n", dev); kfree(dev); } usb_set_intfdata(intf, NULL); } static void *siano_media_device_register(struct smsusb_device_t *dev, int board_id) { #ifdef CONFIG_MEDIA_CONTROLLER_DVB struct media_device *mdev; struct usb_device *udev = dev->udev; struct sms_board *board = sms_get_board(board_id); int ret; mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return NULL; media_device_usb_init(mdev, udev, board->name); ret = media_device_register(mdev); if (ret) { media_device_cleanup(mdev); kfree(mdev); return NULL; } pr_info("media controller created\n"); return mdev; #else return NULL; #endif } static int smsusb_init_device(struct usb_interface *intf, int board_id) { struct smsdevice_params_t params; struct smsusb_device_t *dev; void *mdev; int i, rc; int align = 0; /* create device object */ dev = kzalloc(sizeof(struct smsusb_device_t), GFP_KERNEL); if (!dev) return -ENOMEM; memset(&params, 0, sizeof(params)); usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); dev->state = SMSUSB_DISCONNECTED; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *desc = &intf->cur_altsetting->endpoint[i].desc; if (desc->bEndpointAddress & USB_DIR_IN) { dev->in_ep = desc->bEndpointAddress; align = usb_endpoint_maxp(desc) - sizeof(struct sms_msg_hdr); } else { dev->out_ep = desc->bEndpointAddress; } } pr_debug("in_ep = %02x, out_ep = %02x\n", dev->in_ep, dev->out_ep); if (!dev->in_ep || !dev->out_ep || align < 0) { /* Missing endpoints? */ smsusb_term_device(intf); return -ENODEV; } params.device_type = sms_get_board(board_id)->type; switch (params.device_type) { case SMS_STELLAR: dev->buffer_size = USB1_BUFFER_SIZE; params.setmode_handler = smsusb1_setmode; params.detectmode_handler = smsusb1_detectmode; break; case SMS_UNKNOWN_TYPE: pr_err("Unspecified sms device type!\n"); fallthrough; default: dev->buffer_size = USB2_BUFFER_SIZE; dev->response_alignment = align; params.flags |= SMS_DEVICE_FAMILY2; break; } params.device = &dev->udev->dev; params.usb_device = dev->udev; params.buffer_size = dev->buffer_size; params.num_buffers = MAX_BUFFERS; params.sendrequest_handler = smsusb_sendrequest; params.context = dev; usb_make_path(dev->udev, params.devpath, sizeof(params.devpath)); mdev = siano_media_device_register(dev, board_id); /* register in smscore */ rc = smscore_register_device(&params, &dev->coredev, 0, mdev); if (rc < 0) { pr_err("smscore_register_device(...) failed, rc %d\n", rc); goto err_unregister_device; } smscore_set_board_id(dev->coredev, board_id); dev->coredev->is_usb_device = true; /* initialize urbs */ for (i = 0; i < MAX_URBS; i++) { dev->surbs[i].dev = dev; dev->surbs[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->surbs[i].urb) goto err_unregister_device; } pr_debug("smsusb_start_streaming(...).\n"); rc = smsusb_start_streaming(dev); if (rc < 0) { pr_err("smsusb_start_streaming(...) failed\n"); goto err_unregister_device; } dev->state = SMSUSB_ACTIVE; rc = smscore_start_device(dev->coredev); if (rc < 0) { pr_err("smscore_start_device(...) failed\n"); goto err_unregister_device; } pr_debug("device 0x%p created\n", dev); return rc; err_unregister_device: /* smsusb_term_device() frees any allocated urb. */ smsusb_term_device(intf); #ifdef CONFIG_MEDIA_CONTROLLER_DVB media_device_unregister(mdev); #endif kfree(mdev); return rc; } static int smsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); char devpath[32]; int i, rc; pr_info("board id=%lu, interface number %d\n", id->driver_info, intf->cur_altsetting->desc.bInterfaceNumber); if (sms_get_board(id->driver_info)->intf_num != intf->cur_altsetting->desc.bInterfaceNumber) { pr_debug("interface %d won't be used. Expecting interface %d to popup\n", intf->cur_altsetting->desc.bInterfaceNumber, sms_get_board(id->driver_info)->intf_num); return -ENODEV; } if (intf->num_altsetting > 1) { rc = usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 0); if (rc < 0) { pr_err("usb_set_interface failed, rc %d\n", rc); return rc; } } pr_debug("smsusb_probe %d\n", intf->cur_altsetting->desc.bInterfaceNumber); for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { pr_debug("endpoint %d %02x %02x %d\n", i, intf->cur_altsetting->endpoint[i].desc.bEndpointAddress, intf->cur_altsetting->endpoint[i].desc.bmAttributes, intf->cur_altsetting->endpoint[i].desc.wMaxPacketSize); if (intf->cur_altsetting->endpoint[i].desc.bEndpointAddress & USB_DIR_IN) rc = usb_clear_halt(udev, usb_rcvbulkpipe(udev, intf->cur_altsetting->endpoint[i].desc.bEndpointAddress)); else rc = usb_clear_halt(udev, usb_sndbulkpipe(udev, intf->cur_altsetting->endpoint[i].desc.bEndpointAddress)); } if ((udev->actconfig->desc.bNumInterfaces == 2) && (intf->cur_altsetting->desc.bInterfaceNumber == 0)) { pr_debug("rom interface 0 is not used\n"); return -ENODEV; } if (id->driver_info == SMS1XXX_BOARD_SIANO_STELLAR_ROM) { /* Detected a Siano Stellar uninitialized */ snprintf(devpath, sizeof(devpath), "usb\\%d-%s", udev->bus->busnum, udev->devpath); pr_info("stellar device in cold state was found at %s.\n", devpath); rc = smsusb1_load_firmware( udev, smscore_registry_getmode(devpath), id->driver_info); /* This device will reset and gain another USB ID */ if (!rc) pr_info("stellar device now in warm state\n"); else pr_err("Failed to put stellar in warm state. Error: %d\n", rc); return rc; } else { rc = smsusb_init_device(intf, id->driver_info); } pr_info("Device initialized with return code %d\n", rc); sms_board_load_modules(id->driver_info); return rc; } static void smsusb_disconnect(struct usb_interface *intf) { smsusb_term_device(intf); } static int smsusb_suspend(struct usb_interface *intf, pm_message_t msg) { struct smsusb_device_t *dev = usb_get_intfdata(intf); printk(KERN_INFO "%s Entering status %d.\n", __func__, msg.event); dev->state = SMSUSB_SUSPENDED; /*smscore_set_power_mode(dev, SMS_POWER_MODE_SUSPENDED);*/ smsusb_stop_streaming(dev); return 0; } static int smsusb_resume(struct usb_interface *intf) { int rc, i; struct smsusb_device_t *dev = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); printk(KERN_INFO "%s Entering.\n", __func__); usb_clear_halt(udev, usb_rcvbulkpipe(udev, dev->in_ep)); usb_clear_halt(udev, usb_sndbulkpipe(udev, dev->out_ep)); for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) printk(KERN_INFO "endpoint %d %02x %02x %d\n", i, intf->cur_altsetting->endpoint[i].desc.bEndpointAddress, intf->cur_altsetting->endpoint[i].desc.bmAttributes, intf->cur_altsetting->endpoint[i].desc.wMaxPacketSize); if (intf->num_altsetting > 0) { rc = usb_set_interface(udev, intf->cur_altsetting->desc. bInterfaceNumber, 0); if (rc < 0) { printk(KERN_INFO "%s usb_set_interface failed, rc %d\n", __func__, rc); return rc; } } smsusb_start_streaming(dev); return 0; } static const struct usb_device_id smsusb_id_table[] = { /* This device is only present before firmware load */ { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR_ROM }, /* This device pops up after firmware load */ { USB_DEVICE(0x187f, 0x0100), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0200), .driver_info = SMS1XXX_BOARD_SIANO_NOVA_A }, { USB_DEVICE(0x187f, 0x0201), .driver_info = SMS1XXX_BOARD_SIANO_NOVA_B }, { USB_DEVICE(0x187f, 0x0300), .driver_info = SMS1XXX_BOARD_SIANO_VEGA }, { USB_DEVICE(0x2040, 0x1700), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT }, { USB_DEVICE(0x2040, 0x1800), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A }, { USB_DEVICE(0x2040, 0x1801), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B }, { USB_DEVICE(0x2040, 0x2000), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD }, { USB_DEVICE(0x2040, 0x2009), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 }, { USB_DEVICE(0x2040, 0x200a), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD }, { USB_DEVICE(0x2040, 0x2010), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD }, { USB_DEVICE(0x2040, 0x2011), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD }, { USB_DEVICE(0x2040, 0x2019), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD }, { USB_DEVICE(0x2040, 0x5500), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0x5510), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0x5520), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0x5530), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0x5580), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0x5590), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xb900), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xb910), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xb980), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xb990), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc000), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc010), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc080), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc090), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc0a0), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xf5a0), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x187f, 0x0202), .driver_info = SMS1XXX_BOARD_SIANO_NICE }, { USB_DEVICE(0x187f, 0x0301), .driver_info = SMS1XXX_BOARD_SIANO_VENICE }, { USB_DEVICE(0x187f, 0x0302), .driver_info = SMS1XXX_BOARD_SIANO_VENICE }, { USB_DEVICE(0x187f, 0x0310), .driver_info = SMS1XXX_BOARD_SIANO_MING }, { USB_DEVICE(0x187f, 0x0500), .driver_info = SMS1XXX_BOARD_SIANO_PELE }, { USB_DEVICE(0x187f, 0x0600), .driver_info = SMS1XXX_BOARD_SIANO_RIO }, { USB_DEVICE(0x187f, 0x0700), .driver_info = SMS1XXX_BOARD_SIANO_DENVER_2160 }, { USB_DEVICE(0x187f, 0x0800), .driver_info = SMS1XXX_BOARD_SIANO_DENVER_1530 }, { USB_DEVICE(0x19D2, 0x0086), .driver_info = SMS1XXX_BOARD_ZTE_DVB_DATA_CARD }, { USB_DEVICE(0x19D2, 0x0078), .driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD }, { USB_DEVICE(0x3275, 0x0080), .driver_info = SMS1XXX_BOARD_SIANO_RIO }, { USB_DEVICE(0x2013, 0x0257), .driver_info = SMS1XXX_BOARD_PCTV_77E }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, smsusb_id_table); static struct usb_driver smsusb_driver = { .name = "smsusb", .probe = smsusb_probe, .disconnect = smsusb_disconnect, .id_table = smsusb_id_table, .suspend = smsusb_suspend, .resume = smsusb_resume, }; module_usb_driver(smsusb_driver); MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle"); MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>"); MODULE_LICENSE("GPL");
2 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some belkin "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define BELKIN_HIDDEV 0x01 #define BELKIN_WKBD 0x02 #define belkin_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int belkin_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER || !(quirks & BELKIN_WKBD)) return 0; switch (usage->hid & HID_USAGE) { case 0x03a: belkin_map_key_clear(KEY_SOUND); break; case 0x03b: belkin_map_key_clear(KEY_CAMERA); break; case 0x03c: belkin_map_key_clear(KEY_DOCUMENTS); break; default: return 0; } return 1; } static int belkin_probe(struct hid_device *hdev, const struct hid_device_id *id) { unsigned long quirks = id->driver_data; int ret; hid_set_drvdata(hdev, (void *)quirks); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | ((quirks & BELKIN_HIDDEV) ? HID_CONNECT_HIDDEV_FORCE : 0)); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id belkin_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM), .driver_data = BELKIN_HIDDEV }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD), .driver_data = BELKIN_WKBD }, { } }; MODULE_DEVICE_TABLE(hid, belkin_devices); static struct hid_driver belkin_driver = { .name = "belkin", .id_table = belkin_devices, .input_mapping = belkin_input_mapping, .probe = belkin_probe, }; module_hid_driver(belkin_driver); MODULE_DESCRIPTION("HID driver for some belkin \"special\" devices"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_IP6_ROUTE_H #define _NET_IP6_ROUTE_H #include <net/addrconf.h> #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> #include <net/lwtunnel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> #include <net/nexthop.h> struct route_info { __u8 type; __u8 length; __u8 prefix_len; #if defined(__BIG_ENDIAN_BITFIELD) __u8 reserved_h:3, route_pref:2, reserved_l:3; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 reserved_l:3, route_pref:2, reserved_h:3; #endif __be32 lifetime; __u8 prefix[]; /* 0,8 or 16 */ }; #define RT6_LOOKUP_F_IFACE 0x00000001 #define RT6_LOOKUP_F_REACHABLE 0x00000002 #define RT6_LOOKUP_F_HAS_SADDR 0x00000004 #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 #define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 #define RT6_LOOKUP_F_DST_NOREF 0x00000080 /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header */ #define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) /* * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate * between IPV6_ADDR_PREFERENCES socket option values * IPV6_PREFER_SRC_TMP = 0x1 * IPV6_PREFER_SRC_PUBLIC = 0x2 * IPV6_PREFER_SRC_COA = 0x4 * and above RT6_LOOKUP_F_SRCPREF_xxx flags. */ static inline int rt6_srcprefs2flags(unsigned int srcprefs) { return (srcprefs & IPV6_PREFER_SRC_MASK) << 3; } static inline unsigned int rt6_flags2srcprefs(int flags) { return (flags >> 3) & IPV6_PREFER_SRC_MASK; } static inline bool rt6_need_strict(const struct in6_addr *daddr) { return ipv6_addr_type(daddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } /* fib entries using a nexthop object can not be coalesced into * a multipath route */ static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) { /* the RTF_ADDRCONF flag filters out RA's */ return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh && f6i->fib6_nh->fib_nh_gw_family; } void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, struct flowi6 *fl6, const struct sk_buff *skb, int flags); struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags); static inline struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) { return ip6_route_output_flags(net, sk, fl6, 0); } /* Only conditionally release dst if flags indicates * !RT6_LOOKUP_F_DST_NOREF or dst is in uncached_list. */ static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) { if (!(flags & RT6_LOOKUP_F_DST_NOREF) || !list_empty(&rt->dst.rt_uncached)) ip6_rt_put(rt); } struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags); struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int ifindex, struct flowi6 *fl6, const struct sk_buff *skb, int flags); void ip6_route_init_special_entries(void); int ip6_route_init(void); void ip6_route_cleanup(void); int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg); int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); int ip6_ins_rt(struct net *net, struct fib6_info *f6i); int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify); void rt6_flush_exceptions(struct fib6_info *f6i); void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, unsigned long now); static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, unsigned int prefs, int l3mdev_index, struct in6_addr *saddr) { struct net_device *l3mdev; struct net_device *dev; bool same_vrf; int err = 0; rcu_read_lock(); l3mdev = dev_get_by_index_rcu(net, l3mdev_index); if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) dev = f6i ? fib6_info_nh_dev(f6i) : NULL; same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; if (f6i && f6i->fib6_prefsrc.plen && same_vrf) *saddr = f6i->fib6_prefsrc.addr; else err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); rcu_read_unlock(); return err; } struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, const struct sk_buff *skb, int flags); u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, const struct sk_buff *skb, struct flow_keys *hkeys); struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6); void fib6_force_start_gc(struct net *net); struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev, const struct in6_addr *addr, bool anycast, gfp_t gfp_flags, struct netlink_ext_ack *extack); struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags); /* * support functions for ND * */ struct fib6_info *rt6_get_dflt_router(struct net *net, const struct in6_addr *addr, struct net_device *dev); struct fib6_info *rt6_add_dflt_router(struct net *net, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref, u32 defrtr_usr_metric, int lifetime); void rt6_purge_dflt_routers(struct net *net); int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, const struct in6_addr *gwaddr); void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, u32 mark, kuid_t uid); void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid); void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif); void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); struct netlink_callback; struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; struct net *net; struct fib_dump_filter filter; }; int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); void rt6_sync_up(struct net_device *dev, unsigned char nh_flags); void rt6_disable_ip(struct net_device *dev, unsigned long event); void rt6_sync_down_dev(struct net_device *dev, unsigned long event); void rt6_multipath_rebalance(struct fib6_info *f6i); void rt6_uncached_list_add(struct rt6_info *rt); void rt6_uncached_list_del(struct rt6_info *rt); static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) { const struct dst_entry *dst = skb_dst(skb); if (dst) return dst_rt6_info(dst); return NULL; } /* * Store a destination cache entry in a socket */ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct ipv6_pinfo *np = inet6_sk(sk); np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst)); sk_setup_caps(sk, dst); np->daddr_cache = daddr; #ifdef CONFIG_IPV6_SUBTREES np->saddr_cache = saddr; #endif } void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6); static inline bool ipv6_unicast_destination(const struct sk_buff *skb) { const struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); return rt->rt6i_flags & RTF_LOCAL; } static inline bool ipv6_anycast_destination(const struct dst_entry *dst, const struct in6_addr *daddr) { const struct rt6_info *rt = dst_rt6_info(dst); return rt->rt6i_flags & RTF_ANYCAST || (rt->rt6i_dst.plen < 127 && !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) { const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; const struct dst_entry *dst = skb_dst(skb); unsigned int mtu; if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) { mtu = READ_ONCE(dst->dev->mtu); mtu -= lwtunnel_headroom(dst->lwtstate, mtu); } else { mtu = dst_mtu(dst); } return mtu; } static inline bool ip6_sk_accept_pmtu(const struct sock *sk) { u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); return pmtudisc != IPV6_PMTUDISC_INTERFACE && pmtudisc != IPV6_PMTUDISC_OMIT; } static inline bool ip6_sk_ignore_df(const struct sock *sk) { u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); return pmtudisc < IPV6_PMTUDISC_DO || pmtudisc == IPV6_PMTUDISC_OMIT; } static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, const struct in6_addr *daddr) { if (rt->rt6i_flags & RTF_GATEWAY) return &rt->rt6i_gateway; else if (unlikely(rt->rt6i_flags & RTF_CACHE)) return &rt->rt6i_dst.addr; else return daddr; } static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) { struct fib6_nh *nha, *nhb; if (a->nh || b->nh) return nexthop_cmp(a->nh, b->nh); nha = a->fib6_nh; nhb = b->fib6_nh; return nha->fib_nh_dev == nhb->fib_nh_dev && ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) && !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws); } static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct inet6_dev *idev; unsigned int mtu; if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) { mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) goto out; } mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst->dev); if (idev) mtu = READ_ONCE(idev->cnf.mtu6); rcu_read_unlock(); out: return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } u32 ip6_mtu_from_fib6(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr); struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, struct net_device *dev, struct sk_buff *skb, const void *daddr); #endif
11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 // SPDX-License-Identifier: GPL-2.0+ /* * User-space Probes (UProbes) * * Copyright (C) IBM Corporation, 2008-2012 * Authors: * Srikar Dronamraju * Jim Keniston * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra */ #include <linux/kernel.h> #include <linux/highmem.h> #include <linux/pagemap.h> /* read_mapping_page */ #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/export.h> #include <linux/rmap.h> /* anon_vma_prepare */ #include <linux/mmu_notifier.h> #include <linux/swap.h> /* folio_free_swap */ #include <linux/ptrace.h> /* user_enable_single_step */ #include <linux/kdebug.h> /* notifier mechanism */ #include <linux/percpu-rwsem.h> #include <linux/task_work.h> #include <linux/shmem_fs.h> #include <linux/khugepaged.h> #include <linux/rcupdate_trace.h> #include <linux/workqueue.h> #include <linux/srcu.h> #include <linux/oom.h> /* check_stable_address_space */ #include <linux/uprobes.h> #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE static struct rb_root uprobes_tree = RB_ROOT; /* * allows us to skip the uprobe_mmap if there are no uprobe events active * at this time. Probably a fine grained per inode count is better? */ #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); #define UPROBES_HASH_SZ 13 /* serialize uprobe->pending_list */ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); /* Covers return_instance's uprobe lifetime. */ DEFINE_STATIC_SRCU(uretprobes_srcu); /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 struct uprobe { struct rb_node rb_node; /* node in the rb tree */ refcount_t ref; struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; struct list_head pending_list; struct list_head consumers; struct inode *inode; /* Also hold a ref to inode */ union { struct rcu_head rcu; struct work_struct work; }; loff_t offset; loff_t ref_ctr_offset; unsigned long flags; /* "unsigned long" so bitops work */ /* * The generic code assumes that it has two members of unknown type * owned by the arch-specific code: * * insn - copy_insn() saves the original instruction here for * arch_uprobe_analyze_insn(). * * ixol - potentially modified instruction to execute out of * line, copied to xol_area by xol_get_insn_slot(). */ struct arch_uprobe arch; }; struct delayed_uprobe { struct list_head list; struct uprobe *uprobe; struct mm_struct *mm; }; static DEFINE_MUTEX(delayed_uprobe_lock); static LIST_HEAD(delayed_uprobe_list); /* * Execute out of line area: anonymous executable mapping installed * by the probed task to execute the copy of the original instruction * mangled by set_swbp(). * * On a breakpoint hit, thread contests for a slot. It frees the * slot after singlestep. Currently a fixed number of slots are * allocated. */ struct xol_area { wait_queue_head_t wq; /* if all slots are busy */ unsigned long *bitmap; /* 0 = free slot */ struct page *page; /* * We keep the vma's vm_start rather than a pointer to the vma * itself. The probed process or a naughty kernel module could make * the vma go away, and we must handle that reasonably gracefully. */ unsigned long vaddr; /* Page(s) of instruction slots */ }; static void uprobe_warn(struct task_struct *t, const char *msg) { pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); } /* * valid_vma: Verify if the specified vma is an executable vma * Relax restrictions while unregistering: vm_flags might have * changed after breakpoint was inserted. * - is_register: indicates if we are in register context. * - Return 1 if the specified virtual address is in an * executable vma. */ static bool valid_vma(struct vm_area_struct *vma, bool is_register) { vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; if (is_register) flags |= VM_WRITE; return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; } static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) { return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); } static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) { return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); } /** * __replace_page - replace page in vma by new page. * based on replace_page in mm/ksm.c * * @vma: vma that holds the pte pointing to page * @addr: address the old @page is mapped at * @old_page: the page we are replacing by new_page * @new_page: the modified page we replace page by * * If @new_page is NULL, only unmap @old_page. * * Returns 0 on success, negative error code otherwise. */ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct folio *old_folio = page_folio(old_page); struct folio *new_folio; struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); if (new_page) { new_folio = page_folio(new_page); err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); if (err) return err; } /* For folio_free_swap() below */ folio_lock(old_folio); mmu_notifier_invalidate_range_start(&range); err = -EAGAIN; if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); if (new_page) { folio_get(new_folio); folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); } else /* no new page, just dec_mm_counter for old_page */ dec_mm_counter(mm, MM_ANONPAGES); if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(old_folio)); inc_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); folio_remove_rmap_pte(old_folio, old_page, vma); if (!folio_mapped(old_folio)) folio_free_swap(old_folio); page_vma_mapped_walk_done(&pvmw); folio_put(old_folio); err = 0; unlock: mmu_notifier_invalidate_range_end(&range); folio_unlock(old_folio); return err; } /** * is_swbp_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_swbp_insn * Returns true if @insn is a breakpoint instruction. */ bool __weak is_swbp_insn(uprobe_opcode_t *insn) { return *insn == UPROBE_SWBP_INSN; } /** * is_trap_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_trap_insn * Returns true if @insn is a breakpoint instruction. * * This function is needed for the case where an architecture has multiple * trap instructions (like powerpc). */ bool __weak is_trap_insn(uprobe_opcode_t *insn) { return is_swbp_insn(insn); } static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) { void *kaddr = kmap_atomic(page); memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); kunmap_atomic(kaddr); } static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) { void *kaddr = kmap_atomic(page); memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); kunmap_atomic(kaddr); } static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) { uprobe_opcode_t old_opcode; bool is_swbp; /* * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. * We do not check if it is any other 'trap variant' which could * be conditional trap instruction such as the one powerpc supports. * * The logic is that we do not care if the underlying instruction * is a trap variant; uprobes always wins over any other (gdb) * breakpoint. */ copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); is_swbp = is_swbp_insn(&old_opcode); if (is_swbp_insn(new_opcode)) { if (is_swbp) /* register: already installed? */ return 0; } else { if (!is_swbp) /* unregister: was it changed by us? */ return 0; } return 1; } static struct delayed_uprobe * delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) { struct delayed_uprobe *du; list_for_each_entry(du, &delayed_uprobe_list, list) if (du->uprobe == uprobe && du->mm == mm) return du; return NULL; } static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) { struct delayed_uprobe *du; if (delayed_uprobe_check(uprobe, mm)) return 0; du = kzalloc(sizeof(*du), GFP_KERNEL); if (!du) return -ENOMEM; du->uprobe = uprobe; du->mm = mm; list_add(&du->list, &delayed_uprobe_list); return 0; } static void delayed_uprobe_delete(struct delayed_uprobe *du) { if (WARN_ON(!du)) return; list_del(&du->list); kfree(du); } static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) { struct list_head *pos, *q; struct delayed_uprobe *du; if (!uprobe && !mm) return; list_for_each_safe(pos, q, &delayed_uprobe_list) { du = list_entry(pos, struct delayed_uprobe, list); if (uprobe && du->uprobe != uprobe) continue; if (mm && du->mm != mm) continue; delayed_uprobe_delete(du); } } static bool valid_ref_ctr_vma(struct uprobe *uprobe, struct vm_area_struct *vma) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); return uprobe->ref_ctr_offset && vma->vm_file && file_inode(vma->vm_file) == uprobe->inode && (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && vma->vm_start <= vaddr && vma->vm_end > vaddr; } static struct vm_area_struct * find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *tmp; for_each_vma(vmi, tmp) if (valid_ref_ctr_vma(uprobe, tmp)) return tmp; return NULL; } static int __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) { void *kaddr; struct page *page; int ret; short *ptr; if (!vaddr || !d) return -EINVAL; ret = get_user_pages_remote(mm, vaddr, 1, FOLL_WRITE, &page, NULL); if (unlikely(ret <= 0)) { /* * We are asking for 1 page. If get_user_pages_remote() fails, * it may return 0, in that case we have to return error. */ return ret == 0 ? -EBUSY : ret; } kaddr = kmap_atomic(page); ptr = kaddr + (vaddr & ~PAGE_MASK); if (unlikely(*ptr + d < 0)) { pr_warn("ref_ctr going negative. vaddr: 0x%lx, " "curr val: %d, delta: %d\n", vaddr, *ptr, d); ret = -EINVAL; goto out; } *ptr += d; ret = 0; out: kunmap_atomic(kaddr); put_page(page); return ret; } static void update_ref_ctr_warn(struct uprobe *uprobe, struct mm_struct *mm, short d) { pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, (unsigned long long) uprobe->offset, (unsigned long long) uprobe->ref_ctr_offset, mm); } static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, short d) { struct vm_area_struct *rc_vma; unsigned long rc_vaddr; int ret = 0; rc_vma = find_ref_ctr_vma(uprobe, mm); if (rc_vma) { rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); ret = __update_ref_ctr(mm, rc_vaddr, d); if (ret) update_ref_ctr_warn(uprobe, mm, d); if (d > 0) return ret; } mutex_lock(&delayed_uprobe_lock); if (d > 0) ret = delayed_uprobe_add(uprobe, mm); else delayed_uprobe_remove(uprobe, mm); mutex_unlock(&delayed_uprobe_lock); return ret; } /* * NOTE: * Expect the breakpoint instruction to be the smallest size instruction for * the architecture. If an arch has variable length instruction and the * breakpoint instruction is not of the smallest length instruction * supported by that architecture then we need to modify is_trap_at_addr and * uprobe_write_opcode accordingly. This would never be a problem for archs * that have fixed length instructions. * * uprobe_write_opcode - write the opcode at a given virtual address. * @auprobe: arch specific probepoint information. * @mm: the probed process address space. * @vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @vaddr. * * Called with mm->mmap_lock held for read or write. * Return 0 (success) or a negative errno. */ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t opcode) { struct uprobe *uprobe; struct page *old_page, *new_page; struct vm_area_struct *vma; int ret, is_register, ref_ctr_updated = 0; bool orig_page_huge = false; unsigned int gup_flags = FOLL_FORCE; is_register = is_swbp_insn(&opcode); uprobe = container_of(auprobe, struct uprobe, arch); retry: if (is_register) gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); if (IS_ERR(old_page)) return PTR_ERR(old_page); ret = verify_opcode(old_page, vaddr, &opcode); if (ret <= 0) goto put_old; if (WARN(!is_register && PageCompound(old_page), "uprobe unregister should never work on compound page\n")) { ret = -EINVAL; goto put_old; } /* We are going to replace instruction, update ref_ctr. */ if (!ref_ctr_updated && uprobe->ref_ctr_offset) { ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); if (ret) goto put_old; ref_ctr_updated = 1; } ret = 0; if (!is_register && !PageAnon(old_page)) goto put_old; ret = anon_vma_prepare(vma); if (ret) goto put_old; ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) goto put_old; __SetPageUptodate(new_page); copy_highpage(new_page, old_page); copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); if (!is_register) { struct page *orig_page; pgoff_t index; VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, index); if (orig_page) { if (PageUptodate(orig_page) && pages_identical(new_page, orig_page)) { /* let go new_page */ put_page(new_page); new_page = NULL; if (PageCompound(orig_page)) orig_page_huge = true; } put_page(orig_page); } } ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); if (new_page) put_page(new_page); put_old: put_page(old_page); if (unlikely(ret == -EAGAIN)) goto retry; /* Revert back reference counter if instruction update failed. */ if (ret && is_register && ref_ctr_updated) update_ref_ctr(uprobe, mm, -1); /* try collapse pmd for compound page */ if (!ret && orig_page_huge) collapse_pte_mapped_thp(mm, vaddr, false); return ret; } /** * set_swbp - store breakpoint at a given address. * @auprobe: arch specific probepoint information. * @mm: the probed process address space. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, store the breakpoint instruction at @vaddr. * Return 0 (success) or a negative errno. */ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); } /** * set_orig_insn - Restore the original instruction. * @mm: the probed process address space. * @auprobe: arch specific probepoint information. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, restore the original opcode (opcode) at @vaddr. * Return 0 (success) or a negative errno. */ int __weak set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { return uprobe_write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); } /* uprobe should have guaranteed positive refcount */ static struct uprobe *get_uprobe(struct uprobe *uprobe) { refcount_inc(&uprobe->ref); return uprobe; } /* * uprobe should have guaranteed lifetime, which can be either of: * - caller already has refcount taken (and wants an extra one); * - uprobe is RCU protected and won't be freed until after grace period; * - we are holding uprobes_treelock (for read or write, doesn't matter). */ static struct uprobe *try_get_uprobe(struct uprobe *uprobe) { if (refcount_inc_not_zero(&uprobe->ref)) return uprobe; return NULL; } static inline bool uprobe_is_active(struct uprobe *uprobe) { return !RB_EMPTY_NODE(&uprobe->rb_node); } static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); kfree(uprobe); } static void uprobe_free_srcu(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); } static void uprobe_free_deferred(struct work_struct *work) { struct uprobe *uprobe = container_of(work, struct uprobe, work); write_lock(&uprobes_treelock); if (uprobe_is_active(uprobe)) { write_seqcount_begin(&uprobes_seqcount); rb_erase(&uprobe->rb_node, &uprobes_tree); write_seqcount_end(&uprobes_seqcount); } write_unlock(&uprobes_treelock); /* * If application munmap(exec_vma) before uprobe_unregister() * gets called, we don't get a chance to remove uprobe from * delayed_uprobe_list from remove_breakpoint(). Do it here. */ mutex_lock(&delayed_uprobe_lock); delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); /* start srcu -> rcu_tasks_trace -> kfree chain */ call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); } static void put_uprobe(struct uprobe *uprobe) { if (!refcount_dec_and_test(&uprobe->ref)) return; INIT_WORK(&uprobe->work, uprobe_free_deferred); schedule_work(&uprobe->work); } /* Initialize hprobe as SRCU-protected "leased" uprobe */ static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) { WARN_ON(!uprobe); hprobe->state = HPROBE_LEASED; hprobe->uprobe = uprobe; hprobe->srcu_idx = srcu_idx; } /* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */ static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) { hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; hprobe->uprobe = uprobe; hprobe->srcu_idx = -1; } /* * hprobe_consume() fetches hprobe's underlying uprobe and detects whether * uprobe is SRCU protected or is refcounted. hprobe_consume() can be * used only once for a given hprobe. * * Caller has to call hprobe_finalize() and pass previous hprobe_state, so * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever * is appropriate. */ static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) { *hstate = xchg(&hprobe->state, HPROBE_CONSUMED); switch (*hstate) { case HPROBE_LEASED: case HPROBE_STABLE: return hprobe->uprobe; case HPROBE_GONE: /* uprobe is NULL, no SRCU */ case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */ return NULL; default: WARN(1, "hprobe invalid state %d", *hstate); return NULL; } } /* * Reset hprobe state and, if hprobe was LEASED, release SRCU lock. * hprobe_finalize() can only be used from current context after * hprobe_consume() call (which determines uprobe and hstate value). */ static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate) { switch (hstate) { case HPROBE_LEASED: __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); break; case HPROBE_STABLE: put_uprobe(hprobe->uprobe); break; case HPROBE_GONE: case HPROBE_CONSUMED: break; default: WARN(1, "hprobe invalid state %d", hstate); break; } } /* * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED) * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of * them can win the race to perform SRCU unlocking. Whoever wins must perform * SRCU unlock. * * Returns underlying valid uprobe or NULL, if there was no underlying uprobe * to begin with or we failed to bump its refcount and it's going away. * * Returned non-NULL uprobe can be still safely used within an ongoing SRCU * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has * an extra refcount for caller to assume and use. Otherwise, it's not * guaranteed that returned uprobe has a positive refcount, so caller has to * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current * SRCU lock region. See dup_utask(). */ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) { enum hprobe_state hstate; /* * return_instance's hprobe is protected by RCU. * Underlying uprobe is itself protected from reuse by SRCU. */ lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu)); hstate = READ_ONCE(hprobe->state); switch (hstate) { case HPROBE_STABLE: /* uprobe has positive refcount, bump refcount, if necessary */ return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; case HPROBE_GONE: /* * SRCU was unlocked earlier and we didn't manage to take * uprobe refcnt, so it's effectively NULL */ return NULL; case HPROBE_CONSUMED: /* * uprobe was consumed, so it's effectively NULL as far as * uretprobe processing logic is concerned */ return NULL; case HPROBE_LEASED: { struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); /* * Try to switch hprobe state, guarding against * hprobe_consume() or another hprobe_expire() racing with us. * Note, if we failed to get uprobe refcount, we use special * HPROBE_GONE state to signal that hprobe->uprobe shouldn't * be used as it will be freed after SRCU is unlocked. */ if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { /* We won the race, we are the ones to unlock SRCU */ __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); return get ? get_uprobe(uprobe) : uprobe; } /* * We lost the race, undo refcount bump (if it ever happened), * unless caller would like an extra refcount anyways. */ if (uprobe && !get) put_uprobe(uprobe); /* * Even if hprobe_consume() or another hprobe_expire() wins * the state update race and unlocks SRCU from under us, we * still have a guarantee that underyling uprobe won't be * freed due to ongoing caller's SRCU lock region, so we can * return it regardless. Also, if `get` was true, we also have * an extra ref for the caller to own. This is used in dup_utask(). */ return uprobe; } default: WARN(1, "unknown hprobe state %d", hstate); return NULL; } } static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) { if (l_inode < r->inode) return -1; if (l_inode > r->inode) return 1; if (l_offset < r->offset) return -1; if (l_offset > r->offset) return 1; return 0; } #define __node_2_uprobe(node) \ rb_entry((node), struct uprobe, rb_node) struct __uprobe_key { struct inode *inode; loff_t offset; }; static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) { const struct __uprobe_key *a = key; return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); } static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) { struct uprobe *u = __node_2_uprobe(a); return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); } /* * Assumes being inside RCU protected region. * No refcount is taken on returned uprobe. */ static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) { struct __uprobe_key key = { .inode = inode, .offset = offset, }; struct rb_node *node; unsigned int seq; lockdep_assert(rcu_read_lock_trace_held()); do { seq = read_seqcount_begin(&uprobes_seqcount); node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key); /* * Lockless RB-tree lookups can result only in false negatives. * If the element is found, it is correct and can be returned * under RCU protection. If we find nothing, we need to * validate that seqcount didn't change. If it did, we have to * try again as we might have missed the element (false * negative). If seqcount is unchanged, search truly failed. */ if (node) return __node_2_uprobe(node); } while (read_seqcount_retry(&uprobes_seqcount, seq)); return NULL; } /* * Attempt to insert a new uprobe into uprobes_tree. * * If uprobe already exists (for given inode+offset), we just increment * refcount of previously existing uprobe. * * If not, a provided new instance of uprobe is inserted into the tree (with * assumed initial refcount == 1). * * In any case, we return a uprobe instance that ends up being in uprobes_tree. * Caller has to clean up new uprobe instance, if it ended up not being * inserted into the tree. * * We assume that uprobes_treelock is held for writing. */ static struct uprobe *__insert_uprobe(struct uprobe *uprobe) { struct rb_node *node; again: node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); if (node) { struct uprobe *u = __node_2_uprobe(node); if (!try_get_uprobe(u)) { rb_erase(node, &uprobes_tree); RB_CLEAR_NODE(&u->rb_node); goto again; } return u; } return uprobe; } /* * Acquire uprobes_treelock and insert uprobe into uprobes_tree * (or reuse existing one, see __insert_uprobe() comments above). */ static struct uprobe *insert_uprobe(struct uprobe *uprobe) { struct uprobe *u; write_lock(&uprobes_treelock); write_seqcount_begin(&uprobes_seqcount); u = __insert_uprobe(uprobe); write_seqcount_end(&uprobes_seqcount); write_unlock(&uprobes_treelock); return u; } static void ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) { pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", uprobe->inode->i_ino, (unsigned long long) uprobe->offset, (unsigned long long) cur_uprobe->ref_ctr_offset, (unsigned long long) uprobe->ref_ctr_offset); } static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, loff_t ref_ctr_offset) { struct uprobe *uprobe, *cur_uprobe; uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); if (!uprobe) return ERR_PTR(-ENOMEM); uprobe->inode = inode; uprobe->offset = offset; uprobe->ref_ctr_offset = ref_ctr_offset; INIT_LIST_HEAD(&uprobe->consumers); init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); RB_CLEAR_NODE(&uprobe->rb_node); refcount_set(&uprobe->ref, 1); /* add to uprobes_tree, sorted on inode:offset */ cur_uprobe = insert_uprobe(uprobe); /* a uprobe exists for this inode:offset combination */ if (cur_uprobe != uprobe) { if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { ref_ctr_mismatch_warn(cur_uprobe, uprobe); put_uprobe(cur_uprobe); kfree(uprobe); return ERR_PTR(-EINVAL); } kfree(uprobe); uprobe = cur_uprobe; } return uprobe; } static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { static atomic64_t id; down_write(&uprobe->consumer_rwsem); list_add_rcu(&uc->cons_node, &uprobe->consumers); uc->id = (__u64) atomic64_inc_return(&id); up_write(&uprobe->consumer_rwsem); } /* * For uprobe @uprobe, delete the consumer @uc. * Should never be called with consumer that's not part of @uprobe->consumers. */ static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) { down_write(&uprobe->consumer_rwsem); list_del_rcu(&uc->cons_node); up_write(&uprobe->consumer_rwsem); } static int __copy_insn(struct address_space *mapping, struct file *filp, void *insn, int nbytes, loff_t offset) { struct page *page; /* * Ensure that the page that has the original instruction is populated * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), * see uprobe_register(). */ if (mapping->a_ops->read_folio) page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); copy_from_page(page, offset, insn, nbytes); put_page(page); return 0; } static int copy_insn(struct uprobe *uprobe, struct file *filp) { struct address_space *mapping = uprobe->inode->i_mapping; loff_t offs = uprobe->offset; void *insn = &uprobe->arch.insn; int size = sizeof(uprobe->arch.insn); int len, err = -EIO; /* Copy only available bytes, -EIO if nothing was read */ do { if (offs >= i_size_read(uprobe->inode)) break; len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); err = __copy_insn(mapping, filp, insn, len, offs); if (err) break; insn += len; offs += len; size -= len; } while (size); return err; } static int prepare_uprobe(struct uprobe *uprobe, struct file *file, struct mm_struct *mm, unsigned long vaddr) { int ret = 0; if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) return ret; /* TODO: move this into _register, until then we abuse this sem. */ down_write(&uprobe->consumer_rwsem); if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) goto out; ret = copy_insn(uprobe, file); if (ret) goto out; ret = -ENOTSUPP; if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) goto out; ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); if (ret) goto out; smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); out: up_write(&uprobe->consumer_rwsem); return ret; } static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm) { return !uc->filter || uc->filter(uc, mm); } static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) { struct uprobe_consumer *uc; bool ret = false; down_read(&uprobe->consumer_rwsem); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { ret = consumer_filter(uc, mm); if (ret) break; } up_read(&uprobe->consumer_rwsem); return ret; } static int install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) { bool first_uprobe; int ret; ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); if (ret) return ret; /* * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), * the task can hit this breakpoint right after __replace_page(). */ first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); if (first_uprobe) set_bit(MMF_HAS_UPROBES, &mm->flags); ret = set_swbp(&uprobe->arch, mm, vaddr); if (!ret) clear_bit(MMF_RECALC_UPROBES, &mm->flags); else if (first_uprobe) clear_bit(MMF_HAS_UPROBES, &mm->flags); return ret; } static int remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) { set_bit(MMF_RECALC_UPROBES, &mm->flags); return set_orig_insn(&uprobe->arch, mm, vaddr); } struct map_info { struct map_info *next; struct mm_struct *mm; unsigned long vaddr; }; static inline struct map_info *free_map_info(struct map_info *info) { struct map_info *next = info->next; kfree(info); return next; } static struct map_info * build_map_info(struct address_space *mapping, loff_t offset, bool is_register) { unsigned long pgoff = offset >> PAGE_SHIFT; struct vm_area_struct *vma; struct map_info *curr = NULL; struct map_info *prev = NULL; struct map_info *info; int more = 0; again: i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { if (!valid_vma(vma, is_register)) continue; if (!prev && !more) { /* * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through * reclaim. This is optimistic, no harm done if it fails. */ prev = kmalloc(sizeof(struct map_info), GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); if (prev) prev->next = NULL; } if (!prev) { more++; continue; } if (!mmget_not_zero(vma->vm_mm)) continue; info = prev; prev = prev->next; info->next = curr; curr = info; info->mm = vma->vm_mm; info->vaddr = offset_to_vaddr(vma, offset); } i_mmap_unlock_read(mapping); if (!more) goto out; prev = curr; while (curr) { mmput(curr->mm); curr = curr->next; } do { info = kmalloc(sizeof(struct map_info), GFP_KERNEL); if (!info) { curr = ERR_PTR(-ENOMEM); goto out; } info->next = prev; prev = info; } while (--more); goto again; out: while (prev) prev = free_map_info(prev); return curr; } static int register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) { bool is_register = !!new; struct map_info *info; int err = 0; percpu_down_write(&dup_mmap_sem); info = build_map_info(uprobe->inode->i_mapping, uprobe->offset, is_register); if (IS_ERR(info)) { err = PTR_ERR(info); goto out; } while (info) { struct mm_struct *mm = info->mm; struct vm_area_struct *vma; if (err && is_register) goto free; /* * We take mmap_lock for writing to avoid the race with * find_active_uprobe_rcu() which takes mmap_lock for reading. * Thus this install_breakpoint() can not make * is_trap_at_addr() true right after find_uprobe_rcu() * returns NULL in find_active_uprobe_rcu(). */ mmap_write_lock(mm); if (check_stable_address_space(mm)) goto unlock; vma = find_vma(mm, info->vaddr); if (!vma || !valid_vma(vma, is_register) || file_inode(vma->vm_file) != uprobe->inode) goto unlock; if (vma->vm_start > info->vaddr || vaddr_to_offset(vma, info->vaddr) != uprobe->offset) goto unlock; if (is_register) { /* consult only the "caller", new consumer. */ if (consumer_filter(new, mm)) err = install_breakpoint(uprobe, mm, vma, info->vaddr); } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { if (!filter_chain(uprobe, mm)) err |= remove_breakpoint(uprobe, mm, info->vaddr); } unlock: mmap_write_unlock(mm); free: mmput(mm); info = free_map_info(info); } out: percpu_up_write(&dup_mmap_sem); return err; } /** * uprobe_unregister_nosync - unregister an already registered probe. * @uprobe: uprobe to remove * @uc: identify which probe if multiple probes are colocated. */ void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) { int err; down_write(&uprobe->register_rwsem); consumer_del(uprobe, uc); err = register_for_each_vma(uprobe, NULL); up_write(&uprobe->register_rwsem); /* TODO : cant unregister? schedule a worker thread */ if (unlikely(err)) { uprobe_warn(current, "unregister, leaking uprobe"); return; } put_uprobe(uprobe); } EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); void uprobe_unregister_sync(void) { /* * Now that handler_chain() and handle_uretprobe_chain() iterate over * uprobe->consumers list under RCU protection without holding * uprobe->register_rwsem, we need to wait for RCU grace period to * make sure that we can't call into just unregistered * uprobe_consumer's callbacks anymore. If we don't do that, fast and * unlucky enough caller can free consumer's memory and cause * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ synchronize_rcu_tasks_trace(); synchronize_srcu(&uretprobes_srcu); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); /** * uprobe_register - register a probe * @inode: the file in which the probe has to be placed. * @offset: offset from the start of the file. * @ref_ctr_offset: offset of SDT marker / reference counter * @uc: information on howto handle the probe.. * * Apart from the access refcount, uprobe_register() takes a creation * refcount (thro alloc_uprobe) if and only if this @uprobe is getting * inserted into the rbtree (i.e first consumer for a @inode:@offset * tuple). Creation refcount stops uprobe_unregister from freeing the * @uprobe even before the register operation is complete. Creation * refcount is released when the last @uc for the @uprobe * unregisters. Caller of uprobe_register() is required to keep @inode * (and the containing mount) referenced. * * Return: pointer to the new uprobe on success or an ERR_PTR on failure. */ struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) { struct uprobe *uprobe; int ret; /* Uprobe must have at least one set consumer */ if (!uc->handler && !uc->ret_handler) return ERR_PTR(-EINVAL); /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ if (!inode->i_mapping->a_ops->read_folio && !shmem_mapping(inode->i_mapping)) return ERR_PTR(-EIO); /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) return ERR_PTR(-EINVAL); /* * This ensures that copy_from_page(), copy_to_page() and * __update_ref_ctr() can't cross page boundary. */ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) return ERR_PTR(-EINVAL); if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) return ERR_PTR(-EINVAL); uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); if (IS_ERR(uprobe)) return uprobe; down_write(&uprobe->register_rwsem); consumer_add(uprobe, uc); ret = register_for_each_vma(uprobe, uc); up_write(&uprobe->register_rwsem); if (ret) { uprobe_unregister_nosync(uprobe, uc); /* * Registration might have partially succeeded, so we can have * this consumer being called right at this time. We need to * sync here. It's ok, it's unlikely slow path. */ uprobe_unregister_sync(); return ERR_PTR(ret); } return uprobe; } EXPORT_SYMBOL_GPL(uprobe_register); /** * uprobe_apply - add or remove the breakpoints according to @uc->filter * @uprobe: uprobe which "owns" the breakpoint * @uc: consumer which wants to add more or remove some breakpoints * @add: add or remove the breakpoints * Return: 0 on success or negative error code. */ int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) { struct uprobe_consumer *con; int ret = -ENOENT; down_write(&uprobe->register_rwsem); rcu_read_lock_trace(); list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (con == uc) { ret = register_for_each_vma(uprobe, add ? uc : NULL); break; } } rcu_read_unlock_trace(); up_write(&uprobe->register_rwsem); return ret; } static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; int err = 0; mmap_read_lock(mm); for_each_vma(vmi, vma) { unsigned long vaddr; loff_t offset; if (!valid_vma(vma, false) || file_inode(vma->vm_file) != uprobe->inode) continue; offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; if (uprobe->offset < offset || uprobe->offset >= offset + vma->vm_end - vma->vm_start) continue; vaddr = offset_to_vaddr(vma, uprobe->offset); err |= remove_breakpoint(uprobe, mm, vaddr); } mmap_read_unlock(mm); return err; } static struct rb_node * find_node_in_range(struct inode *inode, loff_t min, loff_t max) { struct rb_node *n = uprobes_tree.rb_node; while (n) { struct uprobe *u = rb_entry(n, struct uprobe, rb_node); if (inode < u->inode) { n = n->rb_left; } else if (inode > u->inode) { n = n->rb_right; } else { if (max < u->offset) n = n->rb_left; else if (min > u->offset) n = n->rb_right; else break; } } return n; } /* * For a given range in vma, build a list of probes that need to be inserted. */ static void build_probe_list(struct inode *inode, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *head) { loff_t min, max; struct rb_node *n, *t; struct uprobe *u; INIT_LIST_HEAD(head); min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; read_lock(&uprobes_treelock); n = find_node_in_range(inode, min, max); if (n) { for (t = n; t; t = rb_prev(t)) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset < min) break; /* if uprobe went away, it's safe to ignore it */ if (try_get_uprobe(u)) list_add(&u->pending_list, head); } for (t = n; (t = rb_next(t)); ) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset > max) break; /* if uprobe went away, it's safe to ignore it */ if (try_get_uprobe(u)) list_add(&u->pending_list, head); } } read_unlock(&uprobes_treelock); } /* @vma contains reference counter, not the probed instruction. */ static int delayed_ref_ctr_inc(struct vm_area_struct *vma) { struct list_head *pos, *q; struct delayed_uprobe *du; unsigned long vaddr; int ret = 0, err = 0; mutex_lock(&delayed_uprobe_lock); list_for_each_safe(pos, q, &delayed_uprobe_list) { du = list_entry(pos, struct delayed_uprobe, list); if (du->mm != vma->vm_mm || !valid_ref_ctr_vma(du->uprobe, vma)) continue; vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); if (ret) { update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); if (!err) err = ret; } delayed_uprobe_delete(du); } mutex_unlock(&delayed_uprobe_lock); return err; } /* * Called from mmap_region/vma_merge with mm->mmap_lock acquired. * * Currently we ignore all errors and always return 0, the callers * can't handle the failure anyway. */ int uprobe_mmap(struct vm_area_struct *vma) { struct list_head tmp_list; struct uprobe *uprobe, *u; struct inode *inode; if (no_uprobe_events()) return 0; if (vma->vm_file && (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) delayed_ref_ctr_inc(vma); if (!valid_vma(vma, true)) return 0; inode = file_inode(vma->vm_file); if (!inode) return 0; mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); /* * We can race with uprobe_unregister(), this uprobe can be already * removed. But in this case filter_chain() must return false, all * consumers have gone away. */ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { if (!fatal_signal_pending(current) && filter_chain(uprobe, vma->vm_mm)) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); } put_uprobe(uprobe); } mutex_unlock(uprobes_mmap_hash(inode)); return 0; } static bool vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) { loff_t min, max; struct inode *inode; struct rb_node *n; inode = file_inode(vma->vm_file); min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; read_lock(&uprobes_treelock); n = find_node_in_range(inode, min, max); read_unlock(&uprobes_treelock); return !!n; } /* * Called in context of a munmap of a vma. */ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (no_uprobe_events() || !valid_vma(vma, false)) return; if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ return; if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) return; if (vma_has_uprobes(vma, start, end)) set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); } static vm_fault_t xol_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { struct xol_area *area = vma->vm_mm->uprobes_state.xol_area; vmf->page = area->page; get_page(vmf->page); return 0; } static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { return -EPERM; } static const struct vm_special_mapping xol_mapping = { .name = "[uprobes]", .fault = xol_fault, .mremap = xol_mremap, }; /* Slot allocation for XOL */ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) { struct vm_area_struct *vma; int ret; if (mmap_write_lock_killable(mm)) return -EINTR; if (mm->uprobes_state.xol_area) { ret = -EALREADY; goto fail; } if (!area->vaddr) { /* Try to map as high as possible, this is only a hint. */ area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(area->vaddr)) { ret = area->vaddr; goto fail; } } vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &xol_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; } ret = 0; /* pairs with get_xol_area() */ smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ fail: mmap_write_unlock(mm); return ret; } void * __weak arch_uprobe_trampoline(unsigned long *psize) { static uprobe_opcode_t insn = UPROBE_SWBP_INSN; *psize = UPROBE_SWBP_INSN_SIZE; return &insn; } static struct xol_area *__create_xol_area(unsigned long vaddr) { struct mm_struct *mm = current->mm; unsigned long insns_size; struct xol_area *area; void *insns; area = kzalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) goto out; area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), GFP_KERNEL); if (!area->bitmap) goto free_area; area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); if (!area->page) goto free_bitmap; area->vaddr = vaddr; init_waitqueue_head(&area->wq); /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); insns = arch_uprobe_trampoline(&insns_size); arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); if (!xol_add_vma(mm, area)) return area; __free_page(area->page); free_bitmap: kfree(area->bitmap); free_area: kfree(area); out: return NULL; } /* * get_xol_area - Allocate process's xol_area if necessary. * This area will be used for storing instructions for execution out of line. * * Returns the allocated area or NULL. */ static struct xol_area *get_xol_area(void) { struct mm_struct *mm = current->mm; struct xol_area *area; if (!mm->uprobes_state.xol_area) __create_xol_area(0); /* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ return area; } /* * uprobe_clear_state - Free the area allocated for slots. */ void uprobe_clear_state(struct mm_struct *mm) { struct xol_area *area = mm->uprobes_state.xol_area; mutex_lock(&delayed_uprobe_lock); delayed_uprobe_remove(NULL, mm); mutex_unlock(&delayed_uprobe_lock); if (!area) return; put_page(area->page); kfree(area->bitmap); kfree(area); } void uprobe_start_dup_mmap(void) { percpu_down_read(&dup_mmap_sem); } void uprobe_end_dup_mmap(void) { percpu_up_read(&dup_mmap_sem); } void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) { if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { set_bit(MMF_HAS_UPROBES, &newmm->flags); /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ set_bit(MMF_RECALC_UPROBES, &newmm->flags); } } static unsigned long xol_get_slot_nr(struct xol_area *area) { unsigned long slot_nr; slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); if (slot_nr < UINSNS_PER_PAGE) { if (!test_and_set_bit(slot_nr, area->bitmap)) return slot_nr; } return UINSNS_PER_PAGE; } /* * xol_get_insn_slot - allocate a slot for xol. */ static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { struct xol_area *area = get_xol_area(); unsigned long slot_nr; if (!area) return false; wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return true; } /* * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ static void xol_free_insn_slot(struct uprobe_task *utask) { struct xol_area *area = current->mm->uprobes_state.xol_area; unsigned long offset = utask->xol_vaddr - area->vaddr; unsigned int slot_nr; utask->xol_vaddr = 0; /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */ if (WARN_ON_ONCE(offset >= PAGE_SIZE)) return; slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); } void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { /* Initialize the slot */ copy_to_page(page, vaddr, src, len); /* * We probably need flush_icache_user_page() but it needs vma. * This should work on most of architectures by default. If * architecture needs to do something different it can define * its own version of the function. */ flush_dcache_page(page); } /** * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs * @regs: Reflects the saved state of the task after it has hit a breakpoint * instruction. * Return the address of the breakpoint instruction. */ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; } unsigned long uprobe_get_trap_addr(struct pt_regs *regs) { struct uprobe_task *utask = current->utask; if (unlikely(utask && utask->active_uprobe)) return utask->vaddr; return instruction_pointer(regs); } static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri) { ri->cons_cnt = 0; ri->next = utask->ri_pool; utask->ri_pool = ri; } static struct return_instance *ri_pool_pop(struct uprobe_task *utask) { struct return_instance *ri = utask->ri_pool; if (likely(ri)) utask->ri_pool = ri->next; return ri; } static void ri_free(struct return_instance *ri) { kfree(ri->extra_consumers); kfree_rcu(ri, rcu); } static void free_ret_instance(struct uprobe_task *utask, struct return_instance *ri, bool cleanup_hprobe) { unsigned seq; if (cleanup_hprobe) { enum hprobe_state hstate; (void)hprobe_consume(&ri->hprobe, &hstate); hprobe_finalize(&ri->hprobe, hstate); } /* * At this point return_instance is unlinked from utask's * return_instances list and this has become visible to ri_timer(). * If seqcount now indicates that ri_timer's return instance * processing loop isn't active, we can return ri into the pool of * to-be-reused return instances for future uretprobes. If ri_timer() * happens to be running right now, though, we fallback to safety and * just perform RCU-delated freeing of ri. */ if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) { /* immediate reuse of ri without RCU GP is OK */ ri_pool_push(utask, ri); } else { /* we might be racing with ri_timer(), so play it safe */ ri_free(ri); } } /* * Called with no locks held. * Called in context of an exiting or an exec-ing thread. */ void uprobe_free_utask(struct task_struct *t) { struct uprobe_task *utask = t->utask; struct return_instance *ri, *ri_next; if (!utask) return; t->utask = NULL; WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); timer_delete_sync(&utask->ri_timer); ri = utask->return_instances; while (ri) { ri_next = ri->next; free_ret_instance(utask, ri, true /* cleanup_hprobe */); ri = ri_next; } /* free_ret_instance() above might add to ri_pool, so this loop should come last */ ri = utask->ri_pool; while (ri) { ri_next = ri->next; ri_free(ri); ri = ri_next; } kfree(utask); } #define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */ #define for_each_ret_instance_rcu(pos, head) \ for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next)) static void ri_timer(struct timer_list *timer) { struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer); struct return_instance *ri; /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */ guard(srcu)(&uretprobes_srcu); /* RCU protects return_instance from freeing. */ guard(rcu)(); write_seqcount_begin(&utask->ri_seqcount); for_each_ret_instance_rcu(ri, utask->return_instances) hprobe_expire(&ri->hprobe, false); write_seqcount_end(&utask->ri_seqcount); } static struct uprobe_task *alloc_utask(void) { struct uprobe_task *utask; utask = kzalloc(sizeof(*utask), GFP_KERNEL); if (!utask) return NULL; timer_setup(&utask->ri_timer, ri_timer, 0); seqcount_init(&utask->ri_seqcount); return utask; } /* * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. * * Returns: * - pointer to new uprobe_task on success * - NULL otherwise */ static struct uprobe_task *get_utask(void) { if (!current->utask) current->utask = alloc_utask(); return current->utask; } static struct return_instance *alloc_return_instance(struct uprobe_task *utask) { struct return_instance *ri; ri = ri_pool_pop(utask); if (ri) return ri; ri = kzalloc(sizeof(*ri), GFP_KERNEL); if (!ri) return ZERO_SIZE_PTR; return ri; } static struct return_instance *dup_return_instance(struct return_instance *old) { struct return_instance *ri; ri = kmemdup(old, sizeof(*ri), GFP_KERNEL); if (!ri) return NULL; if (unlikely(old->cons_cnt > 1)) { ri->extra_consumers = kmemdup(old->extra_consumers, sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1), GFP_KERNEL); if (!ri->extra_consumers) { kfree(ri); return NULL; } } return ri; } static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; struct return_instance **p, *o, *n; struct uprobe *uprobe; n_utask = alloc_utask(); if (!n_utask) return -ENOMEM; t->utask = n_utask; /* protect uprobes from freeing, we'll need try_get_uprobe() them */ guard(srcu)(&uretprobes_srcu); p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { n = dup_return_instance(o); if (!n) return -ENOMEM; /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */ uprobe = hprobe_expire(&o->hprobe, true); /* * New utask will have stable properly refcounted uprobe or * NULL. Even if we failed to get refcounted uprobe, we still * need to preserve full set of return_instances for proper * uretprobe handling and nesting in forked task. */ hprobe_init_stable(&n->hprobe, uprobe); n->next = NULL; rcu_assign_pointer(*p, n); p = &n->next; n_utask->depth++; } return 0; } static void dup_xol_work(struct callback_head *work) { if (current->flags & PF_EXITING) return; if (!__create_xol_area(current->utask->dup_xol_addr) && !fatal_signal_pending(current)) uprobe_warn(current, "dup xol area"); } /* * Called in context of a new clone/fork from copy_process. */ void uprobe_copy_process(struct task_struct *t, unsigned long flags) { struct uprobe_task *utask = current->utask; struct mm_struct *mm = current->mm; struct xol_area *area; t->utask = NULL; if (!utask || !utask->return_instances) return; if (mm == t->mm && !(flags & CLONE_VFORK)) return; if (dup_utask(t, utask)) return uprobe_warn(t, "dup ret instances"); /* The task can fork() after dup_xol_work() fails */ area = mm->uprobes_state.xol_area; if (!area) return uprobe_warn(t, "dup xol area"); if (mm == t->mm) return; t->utask->dup_xol_addr = area->vaddr; init_task_work(&t->utask->dup_xol_work, dup_xol_work); task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); } /* * Current area->vaddr notion assume the trampoline address is always * equal area->vaddr. * * Returns -1 in case the xol_area is not allocated. */ unsigned long uprobe_get_trampoline_vaddr(void) { struct xol_area *area; unsigned long trampoline_vaddr = -1; /* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ if (area) trampoline_vaddr = area->vaddr; return trampoline_vaddr; } static void cleanup_return_instances(struct uprobe_task *utask, bool chained, struct pt_regs *regs) { struct return_instance *ri = utask->return_instances, *ri_next; enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { ri_next = ri->next; rcu_assign_pointer(utask->return_instances, ri_next); utask->depth--; free_ret_instance(utask, ri, true /* cleanup_hprobe */); ri = ri_next; } } static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, struct return_instance *ri) { struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; bool chained; int srcu_idx; if (!get_xol_area()) goto free; if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", current->pid, current->tgid); goto free; } trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) goto free; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); cleanup_return_instances(utask, chained, regs); /* * We don't want to keep trampoline address in stack, rather keep the * original return address of first caller thru all the consequent * instances. This also makes breakpoint unwrapping easier. */ if (chained) { if (!utask->return_instances) { /* * This situation is not possible. Likely we have an * attack from user-space. */ uprobe_warn(current, "handle tail call"); goto free; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } /* __srcu_read_lock() because SRCU lock survives switch to user space */ srcu_idx = __srcu_read_lock(&uretprobes_srcu); ri->func = instruction_pointer(regs); ri->stack = user_stack_pointer(regs); ri->orig_ret_vaddr = orig_ret_vaddr; ri->chained = chained; utask->depth++; hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); ri->next = utask->return_instances; rcu_assign_pointer(utask->return_instances, ri); mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD); return; free: ri_free(ri); } /* Prepare to single-step probed instruction out of line. */ static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe_task *utask = current->utask; int err; if (!try_get_uprobe(uprobe)) return -EINVAL; if (!xol_get_insn_slot(uprobe, utask)) { err = -ENOMEM; goto err_out; } utask->vaddr = bp_vaddr; err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { xol_free_insn_slot(utask); goto err_out; } utask->active_uprobe = uprobe; utask->state = UTASK_SSTEP; return 0; err_out: put_uprobe(uprobe); return err; } /* * If we are singlestepping, then ensure this thread is not connected to * non-fatal signals until completion of singlestep. When xol insn itself * triggers the signal, restart the original insn even if the task is * already SIGKILL'ed (since coredump should report the correct ip). This * is even more important if the task has a handler for SIGSEGV/etc, The * _same_ instruction should be repeated again after return from the signal * handler, and SSTEP can never finish in this case. */ bool uprobe_deny_signal(void) { struct task_struct *t = current; struct uprobe_task *utask = t->utask; if (likely(!utask || !utask->active_uprobe)) return false; WARN_ON_ONCE(utask->state != UTASK_SSTEP); if (task_sigpending(t)) { spin_lock_irq(&t->sighand->siglock); clear_tsk_thread_flag(t, TIF_SIGPENDING); spin_unlock_irq(&t->sighand->siglock); if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { utask->state = UTASK_SSTEP_TRAPPED; set_tsk_thread_flag(t, TIF_UPROBE); } } return true; } static void mmf_recalc_uprobes(struct mm_struct *mm) { VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; for_each_vma(vmi, vma) { if (!valid_vma(vma, false)) continue; /* * This is not strictly accurate, we can race with * uprobe_unregister() and see the already removed * uprobe if delete_uprobe() was not yet called. * Or this uprobe can be filtered out. */ if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) return; } clear_bit(MMF_HAS_UPROBES, &mm->flags); } static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) { struct page *page; uprobe_opcode_t opcode; int result; if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) return -EINVAL; pagefault_disable(); result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); pagefault_enable(); if (likely(result == 0)) goto out; result = get_user_pages(vaddr, 1, FOLL_FORCE, &page); if (result < 0) return result; copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); put_page(page); out: /* This needs to return true for any variant of the trap insn */ return is_trap_insn(&opcode); } static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr) { struct mm_struct *mm = current->mm; struct uprobe *uprobe = NULL; struct vm_area_struct *vma; struct file *vm_file; loff_t offset; unsigned int seq; guard(rcu)(); if (!mmap_lock_speculate_try_begin(mm, &seq)) return NULL; vma = vma_lookup(mm, bp_vaddr); if (!vma) return NULL; /* * vm_file memory can be reused for another instance of struct file, * but can't be freed from under us, so it's safe to read fields from * it, even if the values are some garbage values; ultimately * find_uprobe_rcu() + mmap_lock_speculation_end() check will ensure * that whatever we speculatively found is correct */ vm_file = READ_ONCE(vma->vm_file); if (!vm_file) return NULL; offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start); uprobe = find_uprobe_rcu(vm_file->f_inode, offset); if (!uprobe) return NULL; /* now double check that nothing about MM changed */ if (mmap_lock_speculate_retry(mm, seq)) return NULL; return uprobe; } /* assumes being inside RCU protected region */ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp) { struct mm_struct *mm = current->mm; struct uprobe *uprobe = NULL; struct vm_area_struct *vma; uprobe = find_active_uprobe_speculative(bp_vaddr); if (uprobe) return uprobe; mmap_read_lock(mm); vma = vma_lookup(mm, bp_vaddr); if (vma) { if (vma->vm_file) { struct inode *inode = file_inode(vma->vm_file); loff_t offset = vaddr_to_offset(vma, bp_vaddr); uprobe = find_uprobe_rcu(inode, offset); } if (!uprobe) *is_swbp = is_trap_at_addr(mm, bp_vaddr); } else { *is_swbp = -EFAULT; } if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) mmf_recalc_uprobes(mm); mmap_read_unlock(mm); return uprobe; } static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie) { struct return_consumer *ric; if (unlikely(ri == ZERO_SIZE_PTR)) return ri; if (unlikely(ri->cons_cnt > 0)) { ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL); if (!ric) { ri_free(ri); return ZERO_SIZE_PTR; } ri->extra_consumers = ric; } ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1]; ric->id = id; ric->cookie = cookie; ri->cons_cnt++; return ri; } static struct return_consumer * return_consumer_find(struct return_instance *ri, int *iter, int id) { struct return_consumer *ric; int idx; for (idx = *iter; idx < ri->cons_cnt; idx++) { ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1]; if (ric->id == id) { *iter = idx + 1; return ric; } } return NULL; } static bool ignore_ret_handler(int rc) { return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE; } static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) { struct uprobe_consumer *uc; bool has_consumers = false, remove = true; struct return_instance *ri = NULL; struct uprobe_task *utask = current->utask; utask->auprobe = &uprobe->arch; list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; __u64 cookie = 0; int rc = 0; if (uc->handler) { rc = uc->handler(uc, regs, &cookie); WARN(rc < 0 || rc > 2, "bad rc=0x%x from %ps()\n", rc, uc->handler); } remove &= rc == UPROBE_HANDLER_REMOVE; has_consumers = true; if (!uc->ret_handler || ignore_ret_handler(rc)) continue; if (!ri) ri = alloc_return_instance(utask); if (session) ri = push_consumer(ri, uc->id, cookie); } utask->auprobe = NULL; if (!ZERO_OR_NULL_PTR(ri)) prepare_uretprobe(uprobe, regs, ri); if (remove && has_consumers) { down_read(&uprobe->register_rwsem); /* re-check that removal is still required, this time under lock */ if (!filter_chain(uprobe, current->mm)) { WARN_ON(!uprobe_is_active(uprobe)); unapply_uprobe(uprobe, current->mm); } up_read(&uprobe->register_rwsem); } } static void handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) { struct return_consumer *ric; struct uprobe_consumer *uc; int ric_idx = 0; /* all consumers unsubscribed meanwhile */ if (unlikely(!uprobe)) return; rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; if (uc->ret_handler) { ric = return_consumer_find(ri, &ric_idx, uc->id); if (!session || ric) uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL); } } rcu_read_unlock_trace(); } static struct return_instance *find_next_ret_chain(struct return_instance *ri) { bool chained; do { chained = ri->chained; ri = ri->next; /* can't be NULL if chained */ } while (chained); return ri; } void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *ri_next, *next_chain; struct uprobe *uprobe; enum hprobe_state hstate; bool valid; utask = current->utask; if (!utask) goto sigill; ri = utask->return_instances; if (!ri) goto sigill; do { /* * We should throw out the frames invalidated by longjmp(). * If this chain is valid, then the next one should be alive * or NULL; the latter case means that nobody but ri->func * could hit this trampoline on return. TODO: sigaltstack(). */ next_chain = find_next_ret_chain(ri); valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs); instruction_pointer_set(regs, ri->orig_ret_vaddr); do { /* pop current instance from the stack of pending return instances, * as it's not pending anymore: we just fixed up original * instruction pointer in regs and are about to call handlers; * this allows fixup_uretprobe_trampoline_entries() to properly fix up * captured stack traces from uretprobe handlers, in which pending * trampoline addresses on the stack are replaced with correct * original return addresses */ ri_next = ri->next; rcu_assign_pointer(utask->return_instances, ri_next); utask->depth--; uprobe = hprobe_consume(&ri->hprobe, &hstate); if (valid) handle_uretprobe_chain(ri, uprobe, regs); hprobe_finalize(&ri->hprobe, hstate); /* We already took care of hprobe, no need to waste more time on that. */ free_ret_instance(utask, ri, false /* !cleanup_hprobe */); ri = ri_next; } while (ri != next_chain); } while (!valid); return; sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); force_sig(SIGILL); } bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) { return false; } bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs) { return true; } /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. */ static void handle_swbp(struct pt_regs *regs) { struct uprobe *uprobe; unsigned long bp_vaddr; int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); if (bp_vaddr == uprobe_get_trampoline_vaddr()) return uprobe_handle_trampoline(regs); rcu_read_lock_trace(); uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't * access this memory. The latter is only possible if * another thread plays with our ->mm. In both cases * we can simply restart. If this vma was unmapped we * can pretend this insn was not executed yet and get * the (correct) SIGSEGV after restart. */ instruction_pointer_set(regs, bp_vaddr); } goto out; } /* change it in advance for ->handler() and restart */ instruction_pointer_set(regs, bp_vaddr); /* * TODO: move copy_insn/etc into _register and remove this hack. * After we hit the bp, _unregister + _register can install the * new and not-yet-analyzed uprobe at the same address, restart. */ if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) goto out; /* * Pairs with the smp_wmb() in prepare_uprobe(). * * Guarantees that if we see the UPROBE_COPY_INSN bit set, then * we must also see the stores to &uprobe->arch performed by the * prepare_uprobe() call. */ smp_rmb(); /* Tracing handlers use ->utask to communicate with fetch methods */ if (!get_utask()) goto out; if (arch_uprobe_ignore(&uprobe->arch, regs)) goto out; handler_chain(uprobe, regs); if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) goto out; if (pre_ssout(uprobe, regs, bp_vaddr)) goto out; out: /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ rcu_read_unlock_trace(); } /* * Perform required fix-ups and disable singlestep. * Allow pending signals to take effect. */ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) { struct uprobe *uprobe; int err = 0; uprobe = utask->active_uprobe; if (utask->state == UTASK_SSTEP_ACK) err = arch_uprobe_post_xol(&uprobe->arch, regs); else if (utask->state == UTASK_SSTEP_TRAPPED) arch_uprobe_abort_xol(&uprobe->arch, regs); else WARN_ON_ONCE(1); put_uprobe(uprobe); utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; xol_free_insn_slot(utask); spin_lock_irq(&current->sighand->siglock); recalc_sigpending(); /* see uprobe_deny_signal() */ spin_unlock_irq(&current->sighand->siglock); if (unlikely(err)) { uprobe_warn(current, "execute the probed insn, sending SIGILL."); force_sig(SIGILL); } } /* * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and * allows the thread to return from interrupt. After that handle_swbp() * sets utask->active_uprobe. * * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag * and allows the thread to return from interrupt. * * While returning to userspace, thread notices the TIF_UPROBE flag and calls * uprobe_notify_resume(). */ void uprobe_notify_resume(struct pt_regs *regs) { struct uprobe_task *utask; clear_thread_flag(TIF_UPROBE); utask = current->utask; if (utask && utask->active_uprobe) handle_singlestep(utask, regs); else handle_swbp(regs); } /* * uprobe_pre_sstep_notifier gets called from interrupt context as part of * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. */ int uprobe_pre_sstep_notifier(struct pt_regs *regs) { if (!current->mm) return 0; if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) && (!current->utask || !current->utask->return_instances)) return 0; set_thread_flag(TIF_UPROBE); return 1; } /* * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. */ int uprobe_post_sstep_notifier(struct pt_regs *regs) { struct uprobe_task *utask = current->utask; if (!current->mm || !utask || !utask->active_uprobe) /* task is currently not uprobed */ return 0; utask->state = UTASK_SSTEP_ACK; set_thread_flag(TIF_UPROBE); return 1; } static struct notifier_block uprobe_exception_nb = { .notifier_call = arch_uprobe_exception_notify, .priority = INT_MAX-1, /* notified after kprobes, kgdb */ }; void __init uprobes_init(void) { int i; for (i = 0; i < UPROBES_HASH_SZ; i++) mutex_init(&uprobes_mmap_mutex[i]); BUG_ON(register_die_notifier(&uprobe_exception_nb)); }
2123 3290 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_MSR_H #define _ASM_X86_MSR_H #include "msr-index.h" #ifndef __ASSEMBLY__ #include <asm/asm.h> #include <asm/errno.h> #include <asm/cpumask.h> #include <uapi/asm/msr.h> #include <asm/shared/msr.h> #include <linux/percpu.h> struct msr_info { u32 msr_no; struct msr reg; struct msr __percpu *msrs; int err; }; struct msr_regs_info { u32 *regs; int err; }; struct saved_msr { bool valid; struct msr_info info; }; struct saved_msrs { unsigned int num; struct saved_msr *array; }; /* * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" * constraint has different meanings. For i386, "A" means exactly * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, * it means rax *or* rdx. */ #ifdef CONFIG_X86_64 /* Using 64-bit values saves one instruction clearing the high half of low */ #define DECLARE_ARGS(val, low, high) unsigned long low, high #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else #define DECLARE_ARGS(val, low, high) unsigned long long val #define EAX_EDX_VAL(val, low, high) (val) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif /* * Be very careful with includes. This header is prone to include loops. */ #include <asm/atomic.h> #include <linux/tracepoint-defs.h> #ifdef CONFIG_TRACEPOINTS DECLARE_TRACEPOINT(read_msr); DECLARE_TRACEPOINT(write_msr); DECLARE_TRACEPOINT(rdpmc); extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); #else static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} #endif /* * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR * accessors and should not have any tracing or other functionality piggybacking * on them - those are *purely* for accessing MSRs and nothing more. So don't even * think of extending them - you will be slapped with a stinking trout or a frozen * shark will reach you, wherever you are! You've been warned. */ static __always_inline unsigned long long __rdmsr(unsigned int msr) { DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) : EAX_EDX_RET(val, low, high) : "c" (msr)); return EAX_EDX_VAL(val, low, high); } static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) { asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) : : "c" (msr), "a"(low), "d" (high) : "memory"); } #define native_rdmsr(msr, val1, val2) \ do { \ u64 __val = __rdmsr((msr)); \ (void)((val1) = (u32)__val); \ (void)((val2) = (u32)(__val >> 32)); \ } while (0) #define native_wrmsr(msr, low, high) \ __wrmsr(msr, low, high) #define native_wrmsrl(msr, val) \ __wrmsr((msr), (u32)((u64)(val)), \ (u32)((u64)(val) >> 32)) static inline unsigned long long native_read_msr(unsigned int msr) { unsigned long long val; val = __rdmsr(msr); if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, val, 0); return val; } static inline unsigned long long native_read_msr_safe(unsigned int msr, int *err) { DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr ; xor %[err],%[err]\n" "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) : [err] "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr)); if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); return EAX_EDX_VAL(val, low, high); } /* Can be uninlined because referenced by paravirt */ static inline void notrace native_write_msr(unsigned int msr, u32 low, u32 high) { __wrmsr(msr, low, high); if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } /* Can be uninlined because referenced by paravirt */ static inline int notrace native_write_msr_safe(unsigned int msr, u32 low, u32 high) { int err; asm volatile("1: wrmsr ; xor %[err],%[err]\n" "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) : [err] "=a" (err) : "c" (msr), "0" (low), "d" (high) : "memory"); if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), err); return err; } extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); /** * rdtsc() - returns the current TSC without ordering constraints * * rdtsc() returns the result of RDTSC as a 64-bit integer. The * only ordering constraint it supplies is the ordering implied by * "asm volatile": it will put the RDTSC in the place you expect. The * CPU can and will speculatively execute that RDTSC, though, so the * results can be non-monotonic if compared on different CPUs. */ static __always_inline unsigned long long rdtsc(void) { DECLARE_ARGS(val, low, high); asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); return EAX_EDX_VAL(val, low, high); } /** * rdtsc_ordered() - read the current TSC in program order * * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. * It is ordered like a load to a global in-memory counter. It should * be impossible to observe non-monotonic rdtsc_unordered() behavior * across multiple CPUs as long as the TSC is synced. */ static __always_inline unsigned long long rdtsc_ordered(void) { DECLARE_ARGS(val, low, high); /* * The RDTSC instruction is not ordered relative to memory * access. The Intel SDM and the AMD APM are both vague on this * point, but empirically an RDTSC instruction can be * speculatively executed before prior loads. An RDTSC * immediately after an appropriate barrier appears to be * ordered as a normal load, that is, it provides the same * ordering guarantees as reading from a global memory location * that some other imaginary CPU is updating continuously with a * time stamp. * * Thus, use the preferred barrier on the respective CPU, aiming for * RDTSCP as the default. */ asm volatile(ALTERNATIVE_2("rdtsc", "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, "rdtscp", X86_FEATURE_RDTSCP) : EAX_EDX_RET(val, low, high) /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ :: "ecx"); return EAX_EDX_VAL(val, low, high); } static inline unsigned long long native_read_pmc(int counter) { DECLARE_ARGS(val, low, high); asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); if (tracepoint_enabled(rdpmc)) do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); return EAX_EDX_VAL(val, low, high); } #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else #include <linux/errno.h> /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr, low, high) \ do { \ u64 __val = native_read_msr((msr)); \ (void)((low) = (u32)__val); \ (void)((high) = (u32)(__val >> 32)); \ } while (0) static inline void wrmsr(unsigned int msr, u32 low, u32 high) { native_write_msr(msr, low, high); } #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) static inline void wrmsrl(unsigned int msr, u64 val) { native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); } /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) { return native_write_msr_safe(msr, low, high); } /* rdmsr with exception handling */ #define rdmsr_safe(msr, low, high) \ ({ \ int __err; \ u64 __val = native_read_msr_safe((msr), &__err); \ (*low) = (u32)__val; \ (*high) = (u32)(__val >> 32); \ __err; \ }) static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) { int err; *p = native_read_msr_safe(msr, &err); return err; } #define rdpmc(counter, low, high) \ do { \ u64 _l = native_read_pmc((counter)); \ (low) = (u32)_l; \ (high) = (u32)(_l >> 32); \ } while (0) #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) #endif /* !CONFIG_PARAVIRT_XXL */ /* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */ #define WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6) /* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */ static __always_inline void wrmsrns(u32 msr, u64 val) { /* * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant * DS prefix to avoid a trailing NOP. */ asm volatile("1: " ALTERNATIVE("ds wrmsr", WRMSRNS, X86_FEATURE_WRMSRNS) "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32))); } /* * 64-bit version of wrmsr_safe(): */ static inline int wrmsrl_safe(u32 msr, u64 val) { return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); } struct msr __percpu *msrs_alloc(void); void msrs_free(struct msr __percpu *msrs); int msr_set_bit(u32 msr, u8 bit); int msr_clear_bit(u32 msr, u8 bit); #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); #else /* CONFIG_SMP */ static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { rdmsr(msr_no, *l, *h); return 0; } static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { wrmsr(msr_no, l, h); return 0; } static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { rdmsrl(msr_no, *q); return 0; } static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { wrmsrl(msr_no, q); return 0; } static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr __percpu *msrs) { rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h)); } static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr __percpu *msrs) { wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h)); } static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { return rdmsr_safe(msr_no, l, h); } static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { return wrmsr_safe(msr_no, l, h); } static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { return rdmsrl_safe(msr_no, q); } static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { return wrmsrl_safe(msr_no, q); } static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return rdmsr_safe_regs(regs); } static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return wrmsr_safe_regs(regs); } #endif /* CONFIG_SMP */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_MSR_H */
1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 // SPDX-License-Identifier: GPL-2.0 /* * Out-of-line refcount functions. */ #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/bug.h> #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n") void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) { refcount_set(r, REFCOUNT_SATURATED); switch (t) { case REFCOUNT_ADD_NOT_ZERO_OVF: REFCOUNT_WARN("saturated; leaking memory"); break; case REFCOUNT_ADD_OVF: REFCOUNT_WARN("saturated; leaking memory"); break; case REFCOUNT_ADD_UAF: REFCOUNT_WARN("addition on 0; use-after-free"); break; case REFCOUNT_SUB_UAF: REFCOUNT_WARN("underflow; use-after-free"); break; case REFCOUNT_DEC_LEAK: REFCOUNT_WARN("decrement hit 0; leaking memory"); break; default: REFCOUNT_WARN("unknown saturation event!?"); } } EXPORT_SYMBOL(refcount_warn_saturate); /** * refcount_dec_if_one - decrement a refcount if it is 1 * @r: the refcount * * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the * success thereof. * * Like all decrement operations, it provides release memory order and provides * a control dependency. * * It can be used like a try-delete operator; this explicit case is provided * and not cmpxchg in generic, because that would allow implementing unsafe * operations. * * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_dec_if_one(refcount_t *r) { int val = 1; return atomic_try_cmpxchg_release(&r->refs, &val, 0); } EXPORT_SYMBOL(refcount_dec_if_one); /** * refcount_dec_not_one - decrement a refcount if it is not 1 * @r: the refcount * * No atomic_t counterpart, it decrements unless the value is 1, in which case * it will return false. * * Was often done like: atomic_add_unless(&var, -1, 1) * * Return: true if the decrement operation was successful, false otherwise */ bool refcount_dec_not_one(refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); do { if (unlikely(val == REFCOUNT_SATURATED)) return true; if (val == 1) return false; new = val - 1; if (new > val) { WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return true; } } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); return true; } EXPORT_SYMBOL(refcount_dec_not_one); /** * refcount_dec_and_mutex_lock - return holding mutex if able to decrement * refcount to 0 * @r: the refcount * @lock: the mutex to be locked * * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail * to decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Return: true and hold mutex if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { if (refcount_dec_not_one(r)) return false; mutex_lock(lock); if (!refcount_dec_and_test(r)) { mutex_unlock(lock); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_mutex_lock); /** * refcount_dec_and_lock - return holding spinlock if able to decrement * refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to * decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { if (refcount_dec_not_one(r)) return false; spin_lock(lock); if (!refcount_dec_and_test(r)) { spin_unlock(lock); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock); /** * refcount_dec_and_lock_irqsave - return holding spinlock with disabled * interrupts if able to decrement refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * @flags: saved IRQ-flags if the is acquired * * Same as refcount_dec_and_lock() above except that the spinlock is acquired * with disabled interrupts. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags) { if (refcount_dec_not_one(r)) return false; spin_lock_irqsave(lock, *flags); if (!refcount_dec_and_test(r)) { spin_unlock_irqrestore(lock, *flags); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
2 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 // SPDX-License-Identifier: GPL-2.0+ /* * MCT (Magic Control Technology Corp.) USB RS232 Converter Driver * * Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch) * * This program is largely derived from the Belkin USB Serial Adapter Driver * (see belkin_sa.[ch]). All of the information about the device was acquired * by using SniffUSB on Windows98. For technical details see mct_u232.h. * * William G. Greathouse and Greg Kroah-Hartman provided great help on how to * do the reverse engineering and how to write a USB serial device driver. * * TO BE DONE, TO BE CHECKED: * DTR/RTS signal handling may be incomplete or incorrect. I have mainly * implemented what I have seen with SniffUSB or found in belkin_sa.c. * For further TODOs check also belkin_sa.c. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "mct_u232.h" #define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>" #define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver" /* * Function prototypes */ static int mct_u232_port_probe(struct usb_serial_port *port); static void mct_u232_port_remove(struct usb_serial_port *remove); static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); static void mct_u232_read_int_callback(struct urb *urb); static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int mct_u232_break_ctl(struct tty_struct *tty, int break_state); static int mct_u232_tiocmget(struct tty_struct *tty); static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void mct_u232_throttle(struct tty_struct *tty); static void mct_u232_unthrottle(struct tty_struct *tty); /* * All of the device info needed for the MCT USB-RS232 converter. */ static const struct usb_device_id id_table[] = { { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, { USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver mct_u232_device = { .driver = { .name = "mct_u232", }, .description = "MCT U232", .id_table = id_table, .num_ports = 1, .open = mct_u232_open, .close = mct_u232_close, .dtr_rts = mct_u232_dtr_rts, .throttle = mct_u232_throttle, .unthrottle = mct_u232_unthrottle, .read_int_callback = mct_u232_read_int_callback, .set_termios = mct_u232_set_termios, .break_ctl = mct_u232_break_ctl, .tiocmget = mct_u232_tiocmget, .tiocmset = mct_u232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .port_probe = mct_u232_port_probe, .port_remove = mct_u232_port_remove, .get_icount = usb_serial_generic_get_icount, }; static struct usb_serial_driver * const serial_drivers[] = { &mct_u232_device, NULL }; struct mct_u232_private { struct urb *read_urb; spinlock_t lock; unsigned int control_state; /* Modem Line Setting (TIOCM) */ unsigned char last_lcr; /* Line Control Register */ unsigned char last_lsr; /* Line Status Register */ unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ }; #define THROTTLED 0x01 /* * Handle vendor specific USB requests */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* * Later day 2.6.0-test kernels have new baud rates like B230400 which * we do not know how to support. We ignore them for the moment. */ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result) { *result = value; if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID || le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) { switch (value) { case 300: return 0x01; case 600: return 0x02; /* this one not tested */ case 1200: return 0x03; case 2400: return 0x04; case 4800: return 0x06; case 9600: return 0x08; case 19200: return 0x09; case 38400: return 0x0a; case 57600: return 0x0b; case 115200: return 0x0c; default: *result = 9600; return 0x08; } } else { /* FIXME: Can we use any divider - should we do divider = 115200/value; real baud = 115200/divider */ switch (value) { case 300: break; case 600: break; case 1200: break; case 2400: break; case 4800: break; case 9600: break; case 19200: break; case 38400: break; case 57600: break; case 115200: break; default: value = 9600; *result = 9600; } return 115200/value; } } static int mct_u232_set_baud_rate(struct tty_struct *tty, struct usb_serial *serial, struct usb_serial_port *port, speed_t value) { unsigned int divisor; int rc; unsigned char *buf; unsigned char cts_enable_byte = 0; speed_t speed; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; divisor = mct_u232_calculate_baud_rate(serial, value, &speed); put_unaligned_le32(divisor, buf); rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_BAUD_RATE_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE, WDR_TIMEOUT); if (rc < 0) /*FIXME: What value speed results */ dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", value, rc); else tty_encode_baud_rate(tty, speed, speed); dev_dbg(&port->dev, "set_baud_rate: value: 0x%x, divisor: 0x%x\n", value, divisor); /* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which always sends two extra USB 'device request' messages after the 'baud rate change' message. The actual functionality of the request codes in these messages is not fully understood but these particular codes are never seen in any operation besides a baud rate change. Both of these messages send a single byte of data. In the first message, the value of this byte is always zero. The second message has been determined experimentally to control whether data will be transmitted to a device which is not asserting the 'CTS' signal. If the second message's data byte is zero, data will be transmitted even if 'CTS' is not asserted (i.e. no hardware flow control). if the second message's data byte is nonzero (a value of 1 is used by this driver), data will not be transmitted to a device which is not asserting 'CTS'. */ buf[0] = 0; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_UNKNOWN1_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST, rc); if (port && C_CRTSCTS(tty)) cts_enable_byte = 1; dev_dbg(&port->dev, "set_baud_rate: send second control message, data = %02X\n", cts_enable_byte); buf[0] = cts_enable_byte; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_CTS_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_CTS_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); kfree(buf); return rc; } /* mct_u232_set_baud_rate */ static int mct_u232_set_line_ctrl(struct usb_serial_port *port, unsigned char lcr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[0] = lcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_LINE_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); dev_dbg(&port->dev, "set_line_ctrl: 0x%x\n", lcr); kfree(buf); return rc; } /* mct_u232_set_line_ctrl */ static int mct_u232_set_modem_ctrl(struct usb_serial_port *port, unsigned int control_state) { int rc; unsigned char mcr; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; mcr = MCT_U232_MCR_NONE; if (control_state & TIOCM_DTR) mcr |= MCT_U232_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= MCT_U232_MCR_RTS; buf[0] = mcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_MODEM_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); kfree(buf); dev_dbg(&port->dev, "set_modem_ctrl: state=0x%x ==> mcr=0x%x\n", control_state, mcr); if (rc < 0) { dev_err(&port->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); return rc; } return 0; } /* mct_u232_set_modem_ctrl */ static int mct_u232_get_modem_stat(struct usb_serial_port *port, unsigned char *msr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) { *msr = 0; return -ENOMEM; } rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), MCT_U232_GET_MODEM_STAT_REQUEST, MCT_U232_GET_REQUEST_TYPE, 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, WDR_TIMEOUT); if (rc < MCT_U232_GET_MODEM_STAT_SIZE) { dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc); if (rc >= 0) rc = -EIO; *msr = 0; } else { *msr = buf[0]; } dev_dbg(&port->dev, "get_modem_stat: 0x%x\n", *msr); kfree(buf); return rc; } /* mct_u232_get_modem_stat */ static void mct_u232_msr_to_icount(struct async_icount *icount, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DDSR) icount->dsr++; if (msr & MCT_U232_MSR_DCTS) icount->cts++; if (msr & MCT_U232_MSR_DRI) icount->rng++; if (msr & MCT_U232_MSR_DCD) icount->dcd++; } /* mct_u232_msr_to_icount */ static void mct_u232_msr_to_state(struct usb_serial_port *port, unsigned int *control_state, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DSR) *control_state |= TIOCM_DSR; else *control_state &= ~TIOCM_DSR; if (msr & MCT_U232_MSR_CTS) *control_state |= TIOCM_CTS; else *control_state &= ~TIOCM_CTS; if (msr & MCT_U232_MSR_RI) *control_state |= TIOCM_RI; else *control_state &= ~TIOCM_RI; if (msr & MCT_U232_MSR_CD) *control_state |= TIOCM_CD; else *control_state &= ~TIOCM_CD; dev_dbg(&port->dev, "msr_to_state: msr=0x%x ==> state=0x%x\n", msr, *control_state); } /* mct_u232_msr_to_state */ /* * Driver's tty interface functions */ static int mct_u232_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv; /* check first to simplify error handling */ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) { dev_err(&port->dev, "expected endpoint missing\n"); return -ENODEV; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Use second interrupt-in endpoint for reading. */ priv->read_urb = serial->port[1]->interrupt_in_urb; priv->read_urb->context = port; spin_lock_init(&priv->lock); usb_set_serial_port_data(port, priv); return 0; } static void mct_u232_port_remove(struct usb_serial_port *port) { struct mct_u232_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); int retval = 0; unsigned int control_state; unsigned long flags; unsigned char last_lcr; unsigned char last_msr; /* Compensate for a hardware bug: although the Sitecom U232-P25 * device reports a maximum output packet size of 32 bytes, * it seems to be able to accept only 16 bytes (and that's what * SniffUSB says too...) */ if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID) port->bulk_out_size = 16; /* Do a defined restart: the normal serial device seems to * always turn on DTR and RTS here, so do the same. I'm not * sure if this is really necessary. But it should not harm * either. */ spin_lock_irqsave(&priv->lock, flags); if (tty && C_BAUD(tty)) priv->control_state = TIOCM_DTR | TIOCM_RTS; else priv->control_state = 0; priv->last_lcr = (MCT_U232_DATA_BITS_8 | MCT_U232_PARITY_NONE | MCT_U232_STOP_BITS_1); control_state = priv->control_state; last_lcr = priv->last_lcr; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_modem_ctrl(port, control_state); mct_u232_set_line_ctrl(port, last_lcr); /* Read modem status and update control state */ mct_u232_get_modem_stat(port, &last_msr); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = last_msr; mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); spin_unlock_irqrestore(&priv->lock, flags); retval = usb_submit_urb(priv->read_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read) failed pipe 0x%x err %d\n", port->read_urb->pipe, retval); goto error; } retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { usb_kill_urb(priv->read_urb); dev_err(&port->dev, "usb_submit_urb(read int) failed pipe 0x%x err %d", port->interrupt_in_urb->pipe, retval); goto error; } return 0; error: return retval; } /* mct_u232_open */ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) { unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); if (on) priv->control_state |= TIOCM_DTR | TIOCM_RTS; else priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_close(struct usb_serial_port *port) { struct mct_u232_private *priv = usb_get_serial_port_data(port); usb_kill_urb(priv->read_urb); usb_kill_urb(port->interrupt_in_urb); usb_serial_generic_close(port); } /* mct_u232_close */ static void mct_u232_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); /* * Work-a-round: handle the 'usual' bulk-in pipe here */ if (urb->transfer_buffer_length > 2) { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } goto exit; } /* * The interrupt-in pipe signals exceptional conditions (modem line * signal changes and errors). data[0] holds MSR, data[1] holds LSR. */ spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[MCT_U232_MSR_INDEX]; /* Record Control Line states */ mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); mct_u232_msr_to_icount(&port->icount, priv->last_msr); #if 0 /* Not yet handled. See belkin_sa.c for further information */ /* Now to report any errors */ priv->last_lsr = data[MCT_U232_LSR_INDEX]; /* * fill in the flip buffer here, but I do not know the relation * to the current/next receive buffer or characters. I need * to look in to this before committing any code. */ if (priv->last_lsr & MCT_U232_LSR_ERR) { tty = tty_port_tty_get(&port->port); /* Overrun Error */ if (priv->last_lsr & MCT_U232_LSR_OE) { } /* Parity Error */ if (priv->last_lsr & MCT_U232_LSR_PE) { } /* Framing Error */ if (priv->last_lsr & MCT_U232_LSR_FE) { } /* Break Indicator */ if (priv->last_lsr & MCT_U232_LSR_BI) { } tty_kref_put(tty); } #endif wake_up_interruptible(&port->port.delta_msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* mct_u232_read_int_callback */ static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = &tty->termios; unsigned int cflag = termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; unsigned long flags; unsigned int control_state; unsigned char last_lcr; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); last_lcr = 0; /* * Update baud rate. * Do not attempt to cache old rates and skip settings, * disconnects screw such tricks up completely. * Premature optimization is the root of all evil. */ /* reassert DTR and RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud was B0\n", __func__); control_state |= TIOCM_DTR | TIOCM_RTS; mct_u232_set_modem_ctrl(port, control_state); } mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty)); if ((cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud is B0\n", __func__); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); mct_u232_set_modem_ctrl(port, control_state); } /* * Update line control register (LCR) */ /* set the parity */ if (cflag & PARENB) last_lcr |= (cflag & PARODD) ? MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN; else last_lcr |= MCT_U232_PARITY_NONE; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: last_lcr |= MCT_U232_DATA_BITS_5; break; case CS6: last_lcr |= MCT_U232_DATA_BITS_6; break; case CS7: last_lcr |= MCT_U232_DATA_BITS_7; break; case CS8: last_lcr |= MCT_U232_DATA_BITS_8; break; default: dev_err(&port->dev, "CSIZE was not CS5-CS8, using default of 8\n"); last_lcr |= MCT_U232_DATA_BITS_8; break; } termios->c_cflag &= ~CMSPAR; /* set the number of stop bits */ last_lcr |= (cflag & CSTOPB) ? MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1; mct_u232_set_line_ctrl(port, last_lcr); /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; priv->last_lcr = last_lcr; spin_unlock_irqrestore(&priv->lock, flags); } /* mct_u232_set_termios */ static int mct_u232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char lcr; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); lcr = priv->last_lcr; if (break_state) lcr |= MCT_U232_SET_BREAK; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_line_ctrl(port, lcr); } /* mct_u232_break_ctl */ static int mct_u232_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); priv->rx_flags |= THROTTLED; if (C_CRTSCTS(tty)) { priv->control_state &= ~TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } static void mct_u232_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) { priv->rx_flags &= ~THROTTLED; priv->control_state |= TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 // SPDX-License-Identifier: GPL-2.0-or-later /* * DVB USB Linux driver for Anysee E30 DVB-C & DVB-T USB2.0 receiver * * Copyright (C) 2007 Antti Palosaari <crope@iki.fi> * * TODO: * - add smart card reader support for Conditional Access (CA) * * Card reader in Anysee is nothing more than ISO 7816 card reader. * There is no hardware CAM in any Anysee device sold. * In my understanding it should be implemented by making own module * for ISO 7816 card reader, like dvb_ca_en50221 is implemented. This * module registers serial interface that can be used to communicate * with any ISO 7816 smart card. * * Any help according to implement serial smart card reader support * is highly welcome! */ #include "anysee.h" #include "dvb-pll.h" #include "tda1002x.h" #include "mt352.h" #include "mt352_priv.h" #include "zl10353.h" #include "tda18212.h" #include "cx24116.h" #include "stv0900.h" #include "stv6110.h" #include "isl6423.h" #include "cxd2820r.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen, u8 *rbuf, u8 rlen) { struct anysee_state *state = d_to_priv(d); int act_len, ret, i; mutex_lock(&d->usb_mutex); memcpy(&state->buf[0], sbuf, slen); state->buf[60] = state->seq++; dev_dbg(&d->udev->dev, "%s: >>> %*ph\n", __func__, slen, state->buf); /* * We need receive one message more after dvb_usbv2_generic_rw_locked() * due to weird transaction flow, which is 1 x send + 2 x receive. */ ret = dvb_usbv2_generic_rw_locked(d, state->buf, sizeof(state->buf), state->buf, sizeof(state->buf)); if (ret) goto error_unlock; /* get answer, retry few times if error returned */ for (i = 0; i < 3; i++) { /* receive 2nd answer */ ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, d->props->generic_bulk_ctrl_endpoint), state->buf, sizeof(state->buf), &act_len, 2000); if (ret) { dev_dbg(&d->udev->dev, "%s: recv bulk message failed=%d\n", __func__, ret); } else { dev_dbg(&d->udev->dev, "%s: <<< %*ph\n", __func__, rlen, state->buf); if (state->buf[63] != 0x4f) dev_dbg(&d->udev->dev, "%s: cmd failed\n", __func__); break; } } if (ret) { /* all retries failed, it is fatal */ dev_err(&d->udev->dev, "%s: recv bulk message failed=%d\n", KBUILD_MODNAME, ret); goto error_unlock; } /* read request, copy returned data to return buf */ if (rbuf && rlen) memcpy(rbuf, state->buf, rlen); error_unlock: mutex_unlock(&d->usb_mutex); return ret; } static int anysee_read_reg(struct dvb_usb_device *d, u16 reg, u8 *val) { u8 buf[] = {CMD_REG_READ, reg >> 8, reg & 0xff, 0x01}; int ret; ret = anysee_ctrl_msg(d, buf, sizeof(buf), val, 1); dev_dbg(&d->udev->dev, "%s: reg=%04x val=%02x\n", __func__, reg, *val); return ret; } static int anysee_write_reg(struct dvb_usb_device *d, u16 reg, u8 val) { u8 buf[] = {CMD_REG_WRITE, reg >> 8, reg & 0xff, 0x01, val}; dev_dbg(&d->udev->dev, "%s: reg=%04x val=%02x\n", __func__, reg, val); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* write single register with mask */ static int anysee_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return anysee_write_reg(d, reg, val); } /* read single register with mask */ static int anysee_rd_reg_mask(struct dvb_usb_device *d, u16 reg, u8 *val, u8 mask) { int ret, i; u8 tmp; ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; tmp &= mask; /* find position of the first bit */ for (i = 0; i < 8; i++) { if ((mask >> i) & 0x01) break; } *val = tmp >> i; return 0; } static int anysee_get_hw_info(struct dvb_usb_device *d, u8 *id) { u8 buf[] = {CMD_GET_HW_INFO}; return anysee_ctrl_msg(d, buf, sizeof(buf), id, 3); } static int anysee_streaming_ctrl(struct dvb_frontend *fe, int onoff) { u8 buf[] = {CMD_STREAMING_CTRL, (u8)onoff, 0x00}; dev_dbg(&fe_to_d(fe)->udev->dev, "%s: onoff=%d\n", __func__, onoff); return anysee_ctrl_msg(fe_to_d(fe), buf, sizeof(buf), NULL, 0); } static int anysee_led_ctrl(struct dvb_usb_device *d, u8 mode, u8 interval) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x01, mode, interval}; dev_dbg(&d->udev->dev, "%s: state=%d interval=%d\n", __func__, mode, interval); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } static int anysee_ir_ctrl(struct dvb_usb_device *d, u8 onoff) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x02, onoff}; dev_dbg(&d->udev->dev, "%s: onoff=%d\n", __func__, onoff); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* I2C */ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, inc, i = 0; u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].len < 1 || msg[i].len > 2 || msg[i + 1].len > 60) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_READ; buf[1] = (msg[i].addr << 1) | 0x01; buf[2] = msg[i].buf[0]; buf[3] = (msg[i].len < 2) ? 0 : msg[i].buf[1]; buf[4] = msg[i].len-1; buf[5] = msg[i+1].len; ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf, msg[i+1].len); inc = 2; } else { if (msg[i].len > 48) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_WRITE; buf[1] = (msg[i].addr << 1); buf[2] = msg[i].len; buf[3] = 0x01; memcpy(&buf[4], msg[i].buf, msg[i].len); ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0); inc = 1; } if (ret) break; i += inc; } mutex_unlock(&d->i2c_mutex); return ret ? ret : i; } static u32 anysee_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm anysee_i2c_algo = { .master_xfer = anysee_master_xfer, .functionality = anysee_i2c_func, }; static int anysee_mt352_demod_init(struct dvb_frontend *fe) { static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 }; static u8 reset[] = { RESET, 0x80 }; static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 }; static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 }; static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } /* Callbacks for DVB USB */ static struct tda10023_config anysee_tda10023_config = { .demod_address = (0x1a >> 1), .invert = 0, .xtal = 16000000, .pll_m = 11, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_C, .deltaf = 0xfeeb, }; static struct mt352_config anysee_mt352_config = { .demod_address = (0x1e >> 1), .demod_init = anysee_mt352_demod_init, }; static struct zl10353_config anysee_zl10353_config = { .demod_address = (0x1e >> 1), .parallel_ts = 1, }; static struct zl10353_config anysee_zl10353_tda18212_config2 = { .demod_address = (0x1e >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct zl10353_config anysee_zl10353_tda18212_config = { .demod_address = (0x18 >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct tda10023_config anysee_tda10023_tda18212_config = { .demod_address = (0x1a >> 1), .xtal = 16000000, .pll_m = 12, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_B, .deltaf = 0xba02, }; static const struct tda18212_config anysee_tda18212_config = { .if_dvbt_6 = 4150, .if_dvbt_7 = 4150, .if_dvbt_8 = 4150, .if_dvbc = 5000, }; static const struct tda18212_config anysee_tda18212_config2 = { .if_dvbt_6 = 3550, .if_dvbt_7 = 3700, .if_dvbt_8 = 4150, .if_dvbt2_6 = 3250, .if_dvbt2_7 = 4000, .if_dvbt2_8 = 4000, .if_dvbc = 5000, }; static struct cx24116_config anysee_cx24116_config = { .demod_address = (0xaa >> 1), .mpg_clk_pos_pol = 0x00, .i2c_wr_max = 48, }; static struct stv0900_config anysee_stv0900_config = { .demod_address = (0xd0 >> 1), .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1, /* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config anysee_stv6110_config = { .i2c_address = (0xc0 >> 1), .mclk = 16000000, .clk_div = 1, }; static struct isl6423_config anysee_isl6423_config = { .current_max = SEC_CURRENT_800m, .curlim = SEC_CURRENT_LIM_OFF, .mod_extern = 1, .addr = (0x10 >> 1), }; static struct cxd2820r_config anysee_cxd2820r_config = { .i2c_address = 0x6d, /* (0xda >> 1) */ .ts_mode = 0x38, }; /* * New USB device strings: Mfr=1, Product=2, SerialNumber=0 * Manufacturer: AMT.CO.KR * * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=???????? * PCB: ? * parts: DNOS404ZH102A(MT352, DTT7579(?)) * * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)" * PCB: PCB 507T (rev1.61) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)) * OEA=0a OEB=00 OEC=00 OED=ff OEE=00 * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00 * * E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee" * PCB: 507CD (rev1.1) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01 * IOD[0] ZL10353 1=enabled * IOA[7] TS 0=enabled * tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not) * * E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)" * PCB: 507DC (rev0.2) * parts: TDA10023, DTOS403IH102B TM, CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] TDA10023 1=enabled * * E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)" * PCB: 507SI (rev2.1) * parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] CX24116 1=enabled * * E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev0.4) * parts: TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * * E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev1.1) * parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * DVB-C: * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * DVB-T: * IOD[0] ZL10353 1=enabled * IOE[0] tuner 0=enabled * tuner is behind ZL10353 I2C-gate * tuner is behind TDA10023 I2C-gate * * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)" * PCB: 508TC (rev0.6) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)" * PCB: 508S2 (rev0.7) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled * * E7 T2C VID=1c73 PID=861f HW=20 FW=0.1 AMTCI=0.5 "anysee-E7T2C(LP)" * PCB: 508T2C (rev0.3) * parts: DNOQ44QCH106A(CXD2820R, TDA18212), TDA8024 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] CXD2820R 1=enabled * * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)" * PCB: 508PTC (rev0.5) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 PS2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)" * PCB: 508PS2 (rev0.4) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled */ static int anysee_read_config(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; u8 hw_info[3]; /* * Check which hardware we have. * We must do this call two times to get reliable values (hw/fw bug). */ ret = anysee_get_hw_info(d, hw_info); if (ret) goto error; ret = anysee_get_hw_info(d, hw_info); if (ret) goto error; /* * Meaning of these info bytes are guessed. */ dev_info(&d->udev->dev, "%s: firmware version %d.%d hardware id %d\n", KBUILD_MODNAME, hw_info[1], hw_info[2], hw_info[0]); state->hw = hw_info[0]; error: return ret; } /* external I2C gate used for DNOD44CDH086A(TDA18212) tuner module */ static int anysee_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { /* enable / disable tuner access on IOE[4] */ return anysee_wr_reg_mask(fe_to_d(fe), REG_IOE, (enable << 4), 0x10); } static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff) { struct anysee_state *state = fe_to_priv(fe); struct dvb_usb_device *d = fe_to_d(fe); int ret; dev_dbg(&d->udev->dev, "%s: fe=%d onoff=%d\n", __func__, fe->id, onoff); /* no frontend sleep control */ if (onoff == 0) return 0; switch (state->hw) { case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ if (fe->id == 0) { /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable DVB-C tuner on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable DVB-T tuner on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ if (fe->id == 0) { /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; default: ret = 0; } error: return ret; } static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type, u8 addr, void *platform_data) { int ret, num; struct anysee_state *state = d_to_priv(d); struct i2c_client *client; struct i2c_adapter *adapter = &d->i2c_adap; struct i2c_board_info board_info = { .addr = addr, .platform_data = platform_data, }; strscpy(board_info.type, type, I2C_NAME_SIZE); /* find first free client */ for (num = 0; num < ANYSEE_I2C_CLIENT_MAX; num++) { if (state->i2c_client[num] == NULL) break; } dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num); if (num == ANYSEE_I2C_CLIENT_MAX) { dev_err(&d->udev->dev, "%s: I2C client out of index\n", KBUILD_MODNAME); ret = -ENODEV; goto err; } request_module("%s", board_info.type); /* register I2C device */ client = i2c_new_client_device(adapter, &board_info); if (!i2c_client_has_driver(client)) { ret = -ENODEV; goto err; } /* increase I2C driver usage count */ if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); ret = -ENODEV; goto err; } state->i2c_client[num] = client; return 0; err: dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret); return ret; } static void anysee_del_i2c_dev(struct dvb_usb_device *d) { int num; struct anysee_state *state = d_to_priv(d); struct i2c_client *client; /* find last used client */ num = ANYSEE_I2C_CLIENT_MAX; while (num--) { if (state->i2c_client[num] != NULL) break; } dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num); if (num == -1) { dev_err(&d->udev->dev, "%s: I2C client out of index\n", KBUILD_MODNAME); goto err; } client = state->i2c_client[num]; /* decrease I2C driver usage count */ module_put(client->dev.driver->owner); /* unregister I2C device */ i2c_unregister_device(client); state->i2c_client[num] = NULL; err: dev_dbg(&d->udev->dev, "%s: failed\n", __func__); } static int anysee_frontend_attach(struct dvb_usb_adapter *adap) { struct anysee_state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); int ret = 0; u8 tmp; struct i2c_msg msg[2] = { { .addr = 0x60, .flags = 0, .len = 1, .buf = "\x00", }, { .addr = 0x60, .flags = I2C_M_RD, .len = 1, .buf = &tmp, } }; switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ /* attach demod */ adap->fe[0] = dvb_attach(mt352_attach, &anysee_mt352_config, &d->i2c_adap); if (adap->fe[0]) break; /* attach demod */ adap->fe[0] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable transport stream on IOA[7] */ ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ /* enable DVB-C demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_config, &d->i2c_adap, 0x48); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ /* enable DVB-S/S2 demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(cx24116_attach, &anysee_cx24116_config, &d->i2c_adap); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* enable tuner on IOE[4] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 4), 0x10); if (ret) goto error; /* probe TDA18212 */ tmp = 0; ret = i2c_transfer(&d->i2c_adap, msg, 2); if (ret == 2 && tmp == 0xc7) { dev_dbg(&d->udev->dev, "%s: TDA18212 found\n", __func__); state->has_tda18212 = true; } else tmp = 0; /* disable tuner on IOE[4] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 4), 0x10); if (ret) goto error; /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_tda18212_config, &d->i2c_adap, 0x48); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[0]) adap->fe[0]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; } else { /* PLL config */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_config, &d->i2c_adap, 0x48); } /* break out if first frontend attaching fails */ if (!adap->fe[0]) break; /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_tda18212_config2, &d->i2c_adap); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[1]) adap->fe[1]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; } else { /* PLL config */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_tda18212_config, &d->i2c_adap, 0x48); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[0]) adap->fe[0]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; /* break out if first frontend attaching fails */ if (!adap->fe[0]) break; /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* attach demod */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_tda18212_config, &d->i2c_adap); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[1]) adap->fe[1]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; state->has_ci = true; break; case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ /* enable DVB-S/S2 demod on IOE[5] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(stv0900_attach, &anysee_stv0900_config, &d->i2c_adap, 0); state->has_ci = true; break; case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ /* enable DVB-T/T2/C demod on IOE[5] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(cxd2820r_attach, &anysee_cxd2820r_config, &d->i2c_adap, NULL); state->has_ci = true; break; } if (!adap->fe[0]) { /* we have no frontend :-( */ ret = -ENODEV; dev_err(&d->udev->dev, "%s: Unsupported Anysee version. Please report to <linux-media@vger.kernel.org>.\n", KBUILD_MODNAME); } error: return ret; } static int anysee_tuner_attach(struct dvb_usb_adapter *adap) { struct anysee_state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct dvb_frontend *fe; int ret; dev_dbg(&d->udev->dev, "%s:\n", __func__); switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc2 >> 1), NULL, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc2 >> 1), &d->i2c_adap, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe[0], &d->i2c_adap, &anysee_isl6423_config); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* Try first attach TDA18212 silicon tuner on IOE[4], if that * fails attach old simple PLL. */ /* attach tuner */ if (state->has_tda18212) { struct tda18212_config tda18212_config = anysee_tda18212_config; tda18212_config.fe = adap->fe[0]; ret = anysee_add_i2c_dev(d, "tda18212", 0x60, &tda18212_config); if (ret) goto err; /* copy tuner ops for 2nd FE as tuner is shared */ if (adap->fe[1]) { adap->fe[1]->tuner_priv = adap->fe[0]->tuner_priv; memcpy(&adap->fe[1]->ops.tuner_ops, &adap->fe[0]->ops.tuner_ops, sizeof(struct dvb_tuner_ops)); } return 0; } else { /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); if (fe && adap->fe[1]) { /* attach tuner for 2nd FE */ fe = dvb_attach(dvb_pll_attach, adap->fe[1], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); } } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ { /* E7 TC */ /* E7 PTC */ struct tda18212_config tda18212_config = anysee_tda18212_config; tda18212_config.fe = adap->fe[0]; ret = anysee_add_i2c_dev(d, "tda18212", 0x60, &tda18212_config); if (ret) goto err; /* copy tuner ops for 2nd FE as tuner is shared */ if (adap->fe[1]) { adap->fe[1]->tuner_priv = adap->fe[0]->tuner_priv; memcpy(&adap->fe[1]->ops.tuner_ops, &adap->fe[0]->ops.tuner_ops, sizeof(struct dvb_tuner_ops)); } return 0; } case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ /* attach tuner */ fe = dvb_attach(stv6110_attach, adap->fe[0], &anysee_stv6110_config, &d->i2c_adap); if (fe) { /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe[0], &d->i2c_adap, &anysee_isl6423_config); } break; case ANYSEE_HW_508T2C: /* 20 */ { /* E7 T2C */ struct tda18212_config tda18212_config = anysee_tda18212_config2; tda18212_config.fe = adap->fe[0]; ret = anysee_add_i2c_dev(d, "tda18212", 0x60, &tda18212_config); if (ret) goto err; return 0; } default: fe = NULL; } if (fe) ret = 0; else ret = -ENODEV; err: return ret; } #if IS_ENABLED(CONFIG_RC_CORE) static int anysee_rc_query(struct dvb_usb_device *d) { u8 buf[] = {CMD_GET_IR_CODE}; u8 ircode[2]; int ret; /* Remote controller is basic NEC using address byte 0x08. Anysee device RC query returns only two bytes, status and code, address byte is dropped. Also it does not return any value for NEC RCs having address byte other than 0x08. Due to that, we cannot use that device as standard NEC receiver. It could be possible make hack which reads whole code directly from device memory... */ ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode)); if (ret) return ret; if (ircode[0]) { dev_dbg(&d->udev->dev, "%s: key pressed %02x\n", __func__, ircode[1]); rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0x08, ircode[1]), 0); } return 0; } static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { rc->allowed_protos = RC_PROTO_BIT_NEC; rc->query = anysee_rc_query; rc->interval = 250; /* windows driver uses 500ms */ return 0; } #else #define anysee_get_rc_config NULL #endif static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x02, 0x40 | addr >> 8, addr & 0xff, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr, u8 val) { struct dvb_usb_device *d = ci->data; u8 buf[] = {CMD_CI, 0x03, 0x40 | addr >> 8, addr & 0xff, 0x00, 1, val}; return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } static int anysee_ci_read_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x04, 0x40, addr, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr, u8 val) { struct dvb_usb_device *d = ci->data; u8 buf[] = {CMD_CI, 0x05, 0x40, addr, 0x00, 1, val}; return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } static int anysee_ci_slot_reset(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; struct anysee_state *state = d_to_priv(d); state->ci_cam_ready = jiffies + msecs_to_jiffies(1000); ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(300); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_shutdown(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(30); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_ts_enable(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; return anysee_wr_reg_mask(d, REG_IOD, (0 << 1), 0x02); } static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot, int open) { struct dvb_usb_device *d = ci->data; struct anysee_state *state = d_to_priv(d); int ret; u8 tmp = 0; ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40); if (ret) return ret; if (tmp == 0) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT; if (time_after(jiffies, state->ci_cam_ready)) ret |= DVB_CA_EN50221_POLL_CAM_READY; } return ret; } static int anysee_ci_init(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; state->ci.owner = THIS_MODULE; state->ci.read_attribute_mem = anysee_ci_read_attribute_mem; state->ci.write_attribute_mem = anysee_ci_write_attribute_mem; state->ci.read_cam_control = anysee_ci_read_cam_control; state->ci.write_cam_control = anysee_ci_write_cam_control; state->ci.slot_reset = anysee_ci_slot_reset; state->ci.slot_shutdown = anysee_ci_slot_shutdown; state->ci.slot_ts_enable = anysee_ci_slot_ts_enable; state->ci.poll_slot_status = anysee_ci_poll_slot_status; state->ci.data = d; ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07); if (ret) return ret; ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1); if (ret) return ret; state->ci_attached = true; return 0; } static void anysee_ci_release(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); /* detach CI */ if (state->ci_attached) dvb_ca_en50221_release(&state->ci); return; } static int anysee_init(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; /* There is one interface with two alternate settings. Alternate setting 0 is for bulk transfer. Alternate setting 1 is for isochronous transfer. We use bulk transfer (alternate setting 0). */ ret = usb_set_interface(d->udev, 0, 0); if (ret) return ret; /* LED light */ ret = anysee_led_ctrl(d, 0x01, 0x03); if (ret) return ret; /* enable IR */ ret = anysee_ir_ctrl(d, 1); if (ret) return ret; /* attach CI */ if (state->has_ci) { ret = anysee_ci_init(d); if (ret) return ret; } return 0; } static void anysee_exit(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); if (state->i2c_client[0]) anysee_del_i2c_dev(d); return anysee_ci_release(d); } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties anysee_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct anysee_state), .generic_bulk_ctrl_endpoint = 0x01, .generic_bulk_ctrl_endpoint_response = 0x81, .i2c_algo = &anysee_i2c_algo, .read_config = anysee_read_config, .frontend_attach = anysee_frontend_attach, .tuner_attach = anysee_tuner_attach, .init = anysee_init, .get_rc_config = anysee_get_rc_config, .frontend_ctrl = anysee_frontend_ctrl, .streaming_ctrl = anysee_streaming_ctrl, .exit = anysee_exit, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x82, 8, 16 * 512), } } }; static const struct usb_device_id anysee_id_table[] = { { DVB_USB_DEVICE(USB_VID_CYPRESS, USB_PID_ANYSEE, &anysee_props, "Anysee", RC_MAP_ANYSEE) }, { DVB_USB_DEVICE(USB_VID_AMT, USB_PID_ANYSEE, &anysee_props, "Anysee", RC_MAP_ANYSEE) }, { } }; MODULE_DEVICE_TABLE(usb, anysee_id_table); static struct usb_driver anysee_usb_driver = { .name = KBUILD_MODNAME, .id_table = anysee_id_table, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(anysee_usb_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0"); MODULE_LICENSE("GPL");
11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 /* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include <linux/dma-buf.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/iosys-map.h> #include <linux/mem_encrypt.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/types.h> #include <linux/uaccess.h> #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "drm_internal.h" /** @file drm_gem.c * * This file provides some of the base ioctls and library routines for * the graphics memory manager implemented by each device driver. * * Because various devices have different requirements in terms of * synchronization and migration strategies, implementing that is left up to * the driver, and all that the general API provides should be generic -- * allocating objects, reading/writing data with the cpu, freeing objects. * Even there, platform-dependent optimizations for reading/writing data with * the CPU mean we'll likely hook those out to driver-specific calls. However, * the DRI2 implementation wants to have at least allocate/mmap be generic. * * The goal was to have swap-backed object allocation managed through * struct file. However, file descriptors as handles to a struct file have * two major failings: * - Process limits prevent more than 1024 or so being used at a time by * default. * - Inability to allocate high fds will aggravate the X Server's select() * handling, and likely that of many GL client applications as well. * * This led to a plan of using our own integer IDs (called handles, following * DRM terminology) to mimic fds, and implement the fd syscalls we need as * ioctls. The objects themselves will still include the struct file so * that we can transition to fds if the required kernel infrastructure shows * up at a later date, and as our interface with shmfs for memory allocation. */ static void drm_gem_init_release(struct drm_device *dev, void *ptr) { drm_vma_offset_manager_destroy(dev->vma_offset_manager); } /** * drm_gem_init - Initialize the GEM device fields * @dev: drm_devic structure to initialize */ int drm_gem_init(struct drm_device *dev) { struct drm_vma_offset_manager *vma_offset_manager; mutex_init(&dev->object_name_lock); idr_init_base(&dev->object_name_idr, 1); vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); if (!vma_offset_manager) { DRM_ERROR("out of memory\n"); return -ENOMEM; } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); return drmm_add_action(dev, drm_gem_init_release, NULL); } /** * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM * object in a given shmfs mountpoint * * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use * the usual tmpfs mountpoint (`shm_mnt`). * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ int drm_gem_object_init_with_mnt(struct drm_device *dev, struct drm_gem_object *obj, size_t size, struct vfsmount *gemfs) { struct file *filp; drm_gem_private_object_init(dev, obj, size); if (gemfs) filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, VM_NORESERVE); else filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(filp)) return PTR_ERR(filp); obj->filp = filp; return 0; } EXPORT_SYMBOL(drm_gem_object_init_with_mnt); /** * drm_gem_object_init - initialize an allocated shmem-backed GEM object * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { return drm_gem_object_init_with_mnt(dev, obj, size, NULL); } EXPORT_SYMBOL(drm_gem_object_init); /** * drm_gem_private_object_init - initialize an allocated private GEM object * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * * Initialize an already allocated GEM object of the specified size with * no GEM provided backing store. Instead the caller is responsible for * backing the object and handling it. */ void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj->dev = dev; obj->filp = NULL; kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; dma_resv_init(&obj->_resv); if (!obj->resv) obj->resv = &obj->_resv; if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) drm_gem_gpuva_init(obj); drm_vma_node_reset(&obj->vma_node); INIT_LIST_HEAD(&obj->lru_node); } EXPORT_SYMBOL(drm_gem_private_object_init); /** * drm_gem_private_object_fini - Finalize a failed drm_gem_object * @obj: drm_gem_object * * Uninitialize an already allocated GEM object when it initialized failed */ void drm_gem_private_object_fini(struct drm_gem_object *obj) { WARN_ON(obj->dma_buf); dma_resv_fini(&obj->_resv); } EXPORT_SYMBOL(drm_gem_private_object_fini); /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. * * Called after the last handle to the object has been closed * * Removes any name for the object. Note that this must be * called before drm_gem_object_free or we'll be touching * freed memory */ static void drm_gem_object_handle_free(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; /* Remove any name for this object */ if (obj->name) { idr_remove(&dev->object_name_idr, obj->name); obj->name = 0; } } static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) { /* Unbreak the reference cycle if we have an exported dma_buf. */ if (obj->dma_buf) { dma_buf_put(obj->dma_buf); obj->dma_buf = NULL; } } static void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) return; /* * Must bump handle count first as this may be the last * ref, in which case the object would disappear before we * checked for a name */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); drm_gem_object_exported_dma_buf_free(obj); final = true; } mutex_unlock(&dev->object_name_lock); if (final) drm_gem_object_put(obj); } /* * Called at device or object close to release the file's * handle references on objects. */ static int drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; if (obj->funcs->close) obj->funcs->close(obj, file_priv); drm_prime_remove_buf_handle(&file_priv->prime, id); drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); return 0; } /** * drm_gem_handle_delete - deletes the given file-private handle * @filp: drm file-private structure to use for the handle look up * @handle: userspace handle to delete * * Removes the GEM handle from the @filp lookup table which has been added with * drm_gem_handle_create(). If this is the last handle also cleans up linked * resources like GEM names. */ int drm_gem_handle_delete(struct drm_file *filp, u32 handle) { struct drm_gem_object *obj; spin_lock(&filp->table_lock); /* Check if we currently have a reference on the object */ obj = idr_replace(&filp->object_idr, NULL, handle); spin_unlock(&filp->table_lock); if (IS_ERR_OR_NULL(obj)) return -EINVAL; /* Release driver's reference and decrement refcount. */ drm_gem_object_release_handle(handle, obj, filp); /* And finally make the handle available for future allocations. */ spin_lock(&filp->table_lock); idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); return 0; } EXPORT_SYMBOL(drm_gem_handle_delete); /** * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object * @file: drm file-private structure containing the gem object * @dev: corresponding drm_device * @handle: gem object handle * @offset: return location for the fake mmap offset * * This implements the &drm_driver.dumb_map_offset kms driver callback for * drivers which use gem to manage their backing storage. * * Returns: * 0 on success or a negative error code on failure. */ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset) { struct drm_gem_object *obj; int ret; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; /* Don't allow imported objects to be mapped */ if (obj->import_attach) { ret = -EINVAL; goto out; } ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); out: drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); /** * drm_gem_handle_create_tail - internal functions to create a handle * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pointer to return the created handle to the caller * * This expects the &drm_device.object_name_lock to be held already and will * drop it before returning. Used to avoid races in establishing new handles * when importing an object from either an flink name or a dma-buf. * * Handles must be release again through drm_gem_handle_delete(). This is done * when userspace closes @file_priv for all attached handles, or through the * GEM_CLOSE ioctl for individual handles. */ int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { struct drm_device *dev = obj->dev; u32 handle; int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); if (obj->handle_count++ == 0) drm_gem_object_get(obj); /* * Get the user-visible handle using idr. Preload and perform * allocation under our spinlock. */ idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); mutex_unlock(&dev->object_name_lock); if (ret < 0) goto err_unref; handle = ret; ret = drm_vma_node_allow(&obj->vma_node, file_priv); if (ret) goto err_remove; if (obj->funcs->open) { ret = obj->funcs->open(obj, file_priv); if (ret) goto err_revoke; } *handlep = handle; return 0; err_revoke: drm_vma_node_revoke(&obj->vma_node, file_priv); err_remove: spin_lock(&file_priv->table_lock); idr_remove(&file_priv->object_idr, handle); spin_unlock(&file_priv->table_lock); err_unref: drm_gem_object_handle_put_unlocked(obj); return ret; } /** * drm_gem_handle_create - create a gem handle for an object * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pointer to return the created handle to the caller * * Create a handle for this object. This adds a handle reference to the object, * which includes a regular reference count. Callers will likely want to * dereference the object afterwards. * * Since this publishes @obj to userspace it must be fully set up by this point, * drivers must call this last in their buffer object creation callbacks. */ int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { mutex_lock(&obj->dev->object_name_lock); return drm_gem_handle_create_tail(file_priv, obj, handlep); } EXPORT_SYMBOL(drm_gem_handle_create); /** * drm_gem_free_mmap_offset - release a fake mmap offset for an object * @obj: obj in question * * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). * * Note that drm_gem_object_release() already calls this function, so drivers * don't have to take care of releasing the mmap offset themselves when freeing * the GEM object. */ void drm_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); } EXPORT_SYMBOL(drm_gem_free_mmap_offset); /** * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object * @obj: obj in question * @size: the virtual size * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * * This routine allocates and attaches a fake offset for @obj, in cases where * the virtual size differs from the physical size (ie. &drm_gem_object.size). * Otherwise just use drm_gem_create_mmap_offset(). * * This function is idempotent and handles an already allocated mmap offset * transparently. Drivers do not need to check for this case. */ int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) { struct drm_device *dev = obj->dev; return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, size / PAGE_SIZE); } EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); /** * drm_gem_create_mmap_offset - create a fake mmap offset for an object * @obj: obj in question * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * * This routine allocates and attaches a fake offset for @obj. * * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release * the fake offset again. */ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) { return drm_gem_create_mmap_offset_size(obj, obj->size); } EXPORT_SYMBOL(drm_gem_create_mmap_offset); /* * Move folios to appropriate lru and release the folios, decrementing the * ref count of those folios. */ static void drm_gem_check_release_batch(struct folio_batch *fbatch) { check_move_unevictable_folios(fbatch); __folio_batch_release(fbatch); cond_resched(); } /** * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem * @obj: obj in question * * This reads the page-array of the shmem-backing storage of the given gem * object. An array of pages is returned. If a page is not allocated or * swapped-out, this will allocate/swap-in the required pages. Note that the * whole object is covered by the page-array and pinned in memory. * * Use drm_gem_put_pages() to release the array and unpin all pages. * * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). * If you require other GFP-masks, you have to do those allocations yourself. * * Note that you are not allowed to change gfp-zones during runtime. That is, * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as * set during initialization. If you have special zone constraints, set them * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care * to keep pages in the required zone during swap-in. * * This function is only valid on objects initialized with * drm_gem_object_init(), but not for those initialized with * drm_gem_private_object_init() only. */ struct page **drm_gem_get_pages(struct drm_gem_object *obj) { struct address_space *mapping; struct page **pages; struct folio *folio; struct folio_batch fbatch; long i, j, npages; if (WARN_ON(!obj->filp)) return ERR_PTR(-EINVAL); /* This is the shared memory object that backs the GEM resource */ mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless * driver author is doing something really wrong: */ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); npages = obj->size >> PAGE_SHIFT; pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pages == NULL) return ERR_PTR(-ENOMEM); mapping_set_unevictable(mapping); i = 0; while (i < npages) { long nr; folio = shmem_read_folio_gfp(mapping, i, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) goto fail; nr = min(npages - i, folio_nr_pages(folio)); for (j = 0; j < nr; j++, i++) pages[i] = folio_file_page(folio, i); /* Make sure shmem keeps __GFP_DMA32 allocated pages in the * correct region during swapin. Note that this requires * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * so shmem can relocate pages during swapin if required. */ BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && (folio_pfn(folio) >= 0x00100000UL)); } return pages; fail: mapping_clear_unevictable(mapping); folio_batch_init(&fbatch); j = 0; while (j < i) { struct folio *f = page_folio(pages[j]); if (!folio_batch_add(&fbatch, f)) drm_gem_check_release_batch(&fbatch); j += folio_nr_pages(f); } if (fbatch.nr) drm_gem_check_release_batch(&fbatch); kvfree(pages); return ERR_CAST(folio); } EXPORT_SYMBOL(drm_gem_get_pages); /** * drm_gem_put_pages - helper to free backing pages for a GEM object * @obj: obj in question * @pages: pages to free * @dirty: if true, pages will be marked as dirty * @accessed: if true, the pages will be marked as accessed */ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; struct address_space *mapping; struct folio_batch fbatch; mapping = file_inode(obj->filp)->i_mapping; mapping_clear_unevictable(mapping); /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless * driver author is doing something really wrong: */ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); npages = obj->size >> PAGE_SHIFT; folio_batch_init(&fbatch); for (i = 0; i < npages; i++) { struct folio *folio; if (!pages[i]) continue; folio = page_folio(pages[i]); if (dirty) folio_mark_dirty(folio); if (accessed) folio_mark_accessed(folio); /* Undo the reference we took when populating the table */ if (!folio_batch_add(&fbatch, folio)) drm_gem_check_release_batch(&fbatch); i += folio_nr_pages(folio) - 1; } if (folio_batch_count(&fbatch)) drm_gem_check_release_batch(&fbatch); kvfree(pages); } EXPORT_SYMBOL(drm_gem_put_pages); static int objects_lookup(struct drm_file *filp, u32 *handle, int count, struct drm_gem_object **objs) { int i, ret = 0; struct drm_gem_object *obj; spin_lock(&filp->table_lock); for (i = 0; i < count; i++) { /* Check if we currently have a reference on the object */ obj = idr_find(&filp->object_idr, handle[i]); if (!obj) { ret = -ENOENT; break; } drm_gem_object_get(obj); objs[i] = obj; } spin_unlock(&filp->table_lock); return ret; } /** * drm_gem_objects_lookup - look up GEM objects from an array of handles * @filp: DRM file private date * @bo_handles: user pointer to array of userspace handle * @count: size of handle array * @objs_out: returned pointer to array of drm_gem_object pointers * * Takes an array of userspace handles and returns a newly allocated array of * GEM objects. * * For a single handle lookup, use drm_gem_object_lookup(). * * Returns: * @objs filled in with GEM object pointers. Returned GEM objects need to be * released with drm_gem_object_put(). -ENOENT is returned on a lookup * failure. 0 is returned on success. * */ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { int ret; u32 *handles; struct drm_gem_object **objs; if (!count) return 0; objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), GFP_KERNEL | __GFP_ZERO); if (!objs) return -ENOMEM; *objs_out = objs; handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; goto out; } if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { ret = -EFAULT; DRM_DEBUG("Failed to copy in GEM handles\n"); goto out; } ret = objects_lookup(filp, handles, count, objs); out: kvfree(handles); return ret; } EXPORT_SYMBOL(drm_gem_objects_lookup); /** * drm_gem_object_lookup - look up a GEM object from its handle * @filp: DRM file private date * @handle: userspace handle * * If looking up an array of handles, use drm_gem_objects_lookup(). * * Returns: * A reference to the object named by the handle if such exists on @filp, NULL * otherwise. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_file *filp, u32 handle) { struct drm_gem_object *obj = NULL; objects_lookup(filp, &handle, 1, &obj); return obj; } EXPORT_SYMBOL(drm_gem_object_lookup); /** * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects * shared and/or exclusive fences. * @filep: DRM file private date * @handle: userspace handle * @wait_all: if true, wait on all fences, else wait on just exclusive fence * @timeout: timeout value in jiffies or zero to return immediately * * Returns: * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than 0 on success. */ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { long ret; struct drm_gem_object *obj; obj = drm_gem_object_lookup(filep, handle); if (!obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", handle); return -EINVAL; } ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) ret = 0; drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_dma_resv_wait); /** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Releases the handle to an mm object. */ int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_close *args = data; int ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; ret = drm_gem_handle_delete(file_priv, args->handle); return ret; } /** * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Create a global name for an object, returning the name. * * Note that the name does not hold a reference; when the object * is freed, the name goes away. */ int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_flink *args = data; struct drm_gem_object *obj; int ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; obj = drm_gem_object_lookup(file_priv, args->handle); if (obj == NULL) return -ENOENT; mutex_lock(&dev->object_name_lock); /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { ret = -ENOENT; goto err; } if (!obj->name) { ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); if (ret < 0) goto err; obj->name = ret; } args->name = (uint64_t) obj->name; ret = 0; err: mutex_unlock(&dev->object_name_lock); drm_gem_object_put(obj); return ret; } /** * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. * * This handle (of course) holds a reference to the object, so the object * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_open *args = data; struct drm_gem_object *obj; int ret; u32 handle; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; mutex_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); if (obj) { drm_gem_object_get(obj); } else { mutex_unlock(&dev->object_name_lock); return -ENOENT; } /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); if (ret) goto err; args->handle = handle; args->size = obj->size; err: drm_gem_object_put(obj); return ret; } /** * drm_gem_open - initializes GEM file-private structures at devnode open time * @dev: drm_device which is being opened by userspace * @file_private: drm file-private structure to set up * * Called at device open time, sets up the structure for handling refcounting * of mm objects. */ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) { idr_init_base(&file_private->object_idr, 1); spin_lock_init(&file_private->table_lock); } /** * drm_gem_release - release file-private GEM resources * @dev: drm_device which is being closed by userspace * @file_private: drm file-private structure to clean up * * Called at close time when the filp is going away. * * Releases any remaining references on objects by this filp. */ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, file_private); idr_destroy(&file_private->object_idr); } /** * drm_gem_object_release - release GEM buffer object resources * @obj: GEM buffer object * * This releases any structures and resources used by @obj and is the inverse of * drm_gem_object_init(). */ void drm_gem_object_release(struct drm_gem_object *obj) { if (obj->filp) fput(obj->filp); drm_gem_private_object_fini(obj); drm_gem_free_mmap_offset(obj); drm_gem_lru_remove(obj); } EXPORT_SYMBOL(drm_gem_object_release); /** * drm_gem_object_free - free a GEM object * @kref: kref of the object to free * * Called after the last reference to the object has been lost. * * Frees the object */ void drm_gem_object_free(struct kref *kref) { struct drm_gem_object *obj = container_of(kref, struct drm_gem_object, refcount); if (WARN_ON(!obj->funcs->free)) return; obj->funcs->free(obj); } EXPORT_SYMBOL(drm_gem_object_free); /** * drm_gem_vm_open - vma->ops->open implementation for GEM * @vma: VM area structure * * This function implements the #vm_operations_struct open() callback for GEM * drivers. This must be used together with drm_gem_vm_close(). */ void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_get(obj); } EXPORT_SYMBOL(drm_gem_vm_open); /** * drm_gem_vm_close - vma->ops->close implementation for GEM * @vma: VM area structure * * This function implements the #vm_operations_struct close() callback for GEM * drivers. This must be used together with drm_gem_vm_open(). */ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_put(obj); } EXPORT_SYMBOL(drm_gem_vm_close); /** * drm_gem_mmap_obj - memory map a GEM object * @obj: the GEM object to map * @obj_size: the object size to be mapped, in bytes * @vma: VMA for the area to be mapped * * Set up the VMA to prepare mapping of the GEM object using the GEM object's * vm_ops. Depending on their requirements, GEM objects can either * provide a fault handler in their vm_ops (in which case any accesses to * the object will be trapped, to perform migration, GTT binding, surface * register allocation, or performance monitoring), or mmap the buffer memory * synchronously after calling drm_gem_mmap_obj. * * This function is mainly intended to implement the DMABUF mmap operation, when * the GEM object is not looked up based on its fake offset. To implement the * DRM mmap operation, drivers should use the drm_gem_mmap() function. * * drm_gem_mmap_obj() assumes the user is granted access to the buffer while * drm_gem_mmap() prevents unprivileged users from mapping random objects. So * callers must verify access restrictions before calling this helper. * * Return 0 or success or -EINVAL if the object size is smaller than the VMA * size, or if no vm_ops are provided. */ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { int ret; /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close * (which should happen whether the vma was created by this call, or * by a vm_open due to mremap or partial unmap or whatever). */ drm_gem_object_get(obj); vma->vm_private_data = obj; vma->vm_ops = obj->funcs->vm_ops; if (obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); if (ret) goto err_drm_gem_object_put; WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); } else { if (!vma->vm_ops) { ret = -EINVAL; goto err_drm_gem_object_put; } vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); } return 0; err_drm_gem_object_put: drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_mmap_obj); /** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer * @vma: VMA for the area to be mapped * * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * * Look up the GEM object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object) and map it with a call to drm_gem_mmap_obj(). * * If the caller is not granted access to the buffer object, the mmap will fail * with EACCES. Please see the vma manager for more information. */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_object *obj = NULL; struct drm_vma_offset_node *node; int ret; if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, vma->vm_pgoff, vma_pages(vma)); if (likely(node)) { obj = container_of(node, struct drm_gem_object, vma_node); /* * When the object is being freed, after it hits 0-refcnt it * proceeds to tear down the object. In the process it will * attempt to remove the VMA offset and so acquire this * mgr->vm_lock. Therefore if we find an object with a 0-refcnt * that matches our range, we know it is in the process of being * destroyed and will be freed as soon as we release the lock - * so we have to check for the 0-refcnted object and treat it as * invalid. */ if (!kref_get_unless_zero(&obj->refcount)) obj = NULL; } drm_vma_offset_unlock_lookup(dev->vma_offset_manager); if (!obj) return -EINVAL; if (!drm_vma_node_is_allowed(node, priv)) { drm_gem_object_put(obj); return -EACCES; } ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_mmap); void drm_gem_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *obj) { drm_printf_indent(p, indent, "name=%d\n", obj->name); drm_printf_indent(p, indent, "refcount=%u\n", kref_read(&obj->refcount)); drm_printf_indent(p, indent, "start=%08lx\n", drm_vma_node_start(&obj->vma_node)); drm_printf_indent(p, indent, "size=%zu\n", obj->size); drm_printf_indent(p, indent, "imported=%s\n", str_yes_no(obj->import_attach)); if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); } int drm_gem_pin_locked(struct drm_gem_object *obj) { if (obj->funcs->pin) return obj->funcs->pin(obj); return 0; } void drm_gem_unpin_locked(struct drm_gem_object *obj) { if (obj->funcs->unpin) obj->funcs->unpin(obj); } int drm_gem_pin(struct drm_gem_object *obj) { int ret; dma_resv_lock(obj->resv, NULL); ret = drm_gem_pin_locked(obj); dma_resv_unlock(obj->resv); return ret; } void drm_gem_unpin(struct drm_gem_object *obj) { dma_resv_lock(obj->resv, NULL); drm_gem_unpin_locked(obj); dma_resv_unlock(obj->resv); } int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_assert_held(obj->resv); if (!obj->funcs->vmap) return -EOPNOTSUPP; ret = obj->funcs->vmap(obj, map); if (ret) return ret; else if (iosys_map_is_null(map)) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_gem_vmap); void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_assert_held(obj->resv); if (iosys_map_is_null(map)) return; if (obj->funcs->vunmap) obj->funcs->vunmap(obj, map); /* Always set the mapping to NULL. Callers may rely on this. */ iosys_map_clear(map); } EXPORT_SYMBOL(drm_gem_vunmap); void drm_gem_lock(struct drm_gem_object *obj) { dma_resv_lock(obj->resv, NULL); } EXPORT_SYMBOL(drm_gem_lock); void drm_gem_unlock(struct drm_gem_object *obj) { dma_resv_unlock(obj->resv); } EXPORT_SYMBOL(drm_gem_unlock); int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_lock(obj->resv, NULL); ret = drm_gem_vmap(obj, map); dma_resv_unlock(obj->resv); return ret; } EXPORT_SYMBOL(drm_gem_vmap_unlocked); void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_lock(obj->resv, NULL); drm_gem_vunmap(obj, map); dma_resv_unlock(obj->resv); } EXPORT_SYMBOL(drm_gem_vunmap_unlocked); /** * drm_gem_lock_reservations - Sets up the ww context and acquires * the lock on an array of GEM objects. * * Once you've locked your reservations, you'll want to set up space * for your shared fences (if applicable), submit your job, then * drm_gem_unlock_reservations(). * * @objs: drm_gem_objects to lock * @count: Number of objects in @objs * @acquire_ctx: struct ww_acquire_ctx that will be initialized as * part of tracking this set of locked reservations. */ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx) { int contended = -1; int i, ret; ww_acquire_init(acquire_ctx, &reservation_ww_class); retry: if (contended != -1) { struct drm_gem_object *obj = objs[contended]; ret = dma_resv_lock_slow_interruptible(obj->resv, acquire_ctx); if (ret) { ww_acquire_fini(acquire_ctx); return ret; } } for (i = 0; i < count; i++) { if (i == contended) continue; ret = dma_resv_lock_interruptible(objs[i]->resv, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) dma_resv_unlock(objs[j]->resv); if (contended != -1 && contended >= i) dma_resv_unlock(objs[contended]->resv); if (ret == -EDEADLK) { contended = i; goto retry; } ww_acquire_fini(acquire_ctx); return ret; } } ww_acquire_done(acquire_ctx); return 0; } EXPORT_SYMBOL(drm_gem_lock_reservations); void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx) { int i; for (i = 0; i < count; i++) dma_resv_unlock(objs[i]->resv); ww_acquire_fini(acquire_ctx); } EXPORT_SYMBOL(drm_gem_unlock_reservations); /** * drm_gem_lru_init - initialize a LRU * * @lru: The LRU to initialize * @lock: The lock protecting the LRU */ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) { lru->lock = lock; lru->count = 0; INIT_LIST_HEAD(&lru->list); } EXPORT_SYMBOL(drm_gem_lru_init); static void drm_gem_lru_remove_locked(struct drm_gem_object *obj) { obj->lru->count -= obj->size >> PAGE_SHIFT; WARN_ON(obj->lru->count < 0); list_del(&obj->lru_node); obj->lru = NULL; } /** * drm_gem_lru_remove - remove object from whatever LRU it is in * * If the object is currently in any LRU, remove it. * * @obj: The GEM object to remove from current LRU */ void drm_gem_lru_remove(struct drm_gem_object *obj) { struct drm_gem_lru *lru = obj->lru; if (!lru) return; mutex_lock(lru->lock); drm_gem_lru_remove_locked(obj); mutex_unlock(lru->lock); } EXPORT_SYMBOL(drm_gem_lru_remove); /** * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU * * Like &drm_gem_lru_move_tail but lru lock must be held * * @lru: The LRU to move the object into. * @obj: The GEM object to move into this LRU */ void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) { lockdep_assert_held_once(lru->lock); if (obj->lru) drm_gem_lru_remove_locked(obj); lru->count += obj->size >> PAGE_SHIFT; list_add_tail(&obj->lru_node, &lru->list); obj->lru = lru; } EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); /** * drm_gem_lru_move_tail - move the object to the tail of the LRU * * If the object is already in this LRU it will be moved to the * tail. Otherwise it will be removed from whichever other LRU * it is in (if any) and moved into this LRU. * * @lru: The LRU to move the object into. * @obj: The GEM object to move into this LRU */ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) { mutex_lock(lru->lock); drm_gem_lru_move_tail_locked(lru, obj); mutex_unlock(lru->lock); } EXPORT_SYMBOL(drm_gem_lru_move_tail); /** * drm_gem_lru_scan - helper to implement shrinker.scan_objects * * If the shrink callback succeeds, it is expected that the driver * move the object out of this LRU. * * If the LRU possibly contain active buffers, it is the responsibility * of the shrink callback to check for this (ie. dma_resv_test_signaled()) * or if necessary block until the buffer becomes idle. * * @lru: The LRU to scan * @nr_to_scan: The number of pages to try to reclaim * @remaining: The number of pages left to reclaim, should be initialized by caller * @shrink: Callback to try to shrink/reclaim the object. */ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned int nr_to_scan, unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)) { struct drm_gem_lru still_in_lru; struct drm_gem_object *obj; unsigned freed = 0; drm_gem_lru_init(&still_in_lru, lru->lock); mutex_lock(lru->lock); while (freed < nr_to_scan) { obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); if (!obj) break; drm_gem_lru_move_tail_locked(&still_in_lru, obj); /* * If it's in the process of being freed, gem_object->free() * may be blocked on lock waiting to remove it. So just * skip it. */ if (!kref_get_unless_zero(&obj->refcount)) continue; /* * Now that we own a reference, we can drop the lock for the * rest of the loop body, to reduce contention with other * code paths that need the LRU lock */ mutex_unlock(lru->lock); /* * Note that this still needs to be trylock, since we can * hit shrinker in response to trying to get backing pages * for this obj (ie. while it's lock is already held) */ if (!dma_resv_trylock(obj->resv)) { *remaining += obj->size >> PAGE_SHIFT; goto tail; } if (shrink(obj)) { freed += obj->size >> PAGE_SHIFT; /* * If we succeeded in releasing the object's backing * pages, we expect the driver to have moved the object * out of this LRU */ WARN_ON(obj->lru == &still_in_lru); WARN_ON(obj->lru == lru); } dma_resv_unlock(obj->resv); tail: drm_gem_object_put(obj); mutex_lock(lru->lock); } /* * Move objects we've skipped over out of the temporary still_in_lru * back into this LRU */ list_for_each_entry (obj, &still_in_lru.list, lru_node) obj->lru = lru; list_splice_tail(&still_in_lru.list, &lru->list); lru->count += still_in_lru.count; mutex_unlock(lru->lock); return freed; } EXPORT_SYMBOL(drm_gem_lru_scan); /** * drm_gem_evict - helper to evict backing pages for a GEM object * @obj: obj in question */ int drm_gem_evict(struct drm_gem_object *obj) { dma_resv_assert_held(obj->resv); if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) return -EBUSY; if (obj->funcs->evict) return obj->funcs->evict(obj); return 0; } EXPORT_SYMBOL(drm_gem_evict);
1 1 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/module.h> #include <linux/netdevice.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/usb/r8152.h> #define OCP_BASE 0xe86c static int pla_read_word(struct usbnet *dev, u16 index) { u16 byen = BYTE_EN_WORD; u8 shift = index & 2; __le32 tmp; int ret; if (shift) byen <<= shift; index &= ~3; ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); if (ret < 0) goto out; ret = __le32_to_cpu(tmp); ret >>= (shift * 8); ret &= 0xffff; out: return ret; } static int pla_write_word(struct usbnet *dev, u16 index, u32 data) { u32 mask = 0xffff; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; __le32 tmp; int ret; data &= mask; if (shift) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); } index &= ~3; ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); if (ret < 0) goto out; data |= __le32_to_cpu(tmp) & ~mask; tmp = __cpu_to_le32(data); ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index, MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); out: return ret; } static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg) { struct usbnet *dev = netdev_priv(netdev); int ret; ret = pla_write_word(dev, OCP_BASE, 0xa000); if (ret < 0) goto out; ret = pla_read_word(dev, 0xb400 + reg * 2); out: return ret; } static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) { struct usbnet *dev = netdev_priv(netdev); int ret; ret = pla_write_word(dev, OCP_BASE, 0xa000); if (ret < 0) return; ret = pla_write_word(dev, 0xb400 + reg * 2, val); } static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) { int status; status = usbnet_cdc_bind(dev, intf); if (status < 0) return status; dev->mii.dev = dev->net; dev->mii.mdio_read = r8153_ecm_mdio_read; dev->mii.mdio_write = r8153_ecm_mdio_write; dev->mii.reg_num_mask = 0x1f; dev->mii.supports_gmii = 1; return status; } static const struct driver_info r8153_info = { .description = "RTL8153 ECM Device", .flags = FLAG_ETHER, .bind = r8153_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, .manage_power = usbnet_manage_power, }; static const struct usb_device_id products[] = { /* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */ { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&r8153_info, }, /* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&r8153_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static int rtl8153_ecm_probe(struct usb_interface *intf, const struct usb_device_id *id) { #if IS_REACHABLE(CONFIG_USB_RTL8152) if (rtl8152_get_version(intf)) return -ENODEV; #endif return usbnet_probe(intf, id); } static struct usb_driver r8153_ecm_driver = { .name = "r8153_ecm", .id_table = products, .probe = rtl8153_ecm_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = usbnet_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(r8153_ecm_driver); MODULE_AUTHOR("Hayes Wang"); MODULE_DESCRIPTION("Realtek USB ECM device"); MODULE_LICENSE("GPL");
11 11 11 11 11 11 11 63 63 63 63 63 13 13 63 63 63 62 11 63 62 63 63 63 63 62 63 63 63 63 63 62 62 63 11 11 11 63 63 12 61 61 61 61 62 62 63 63 62 63 13 13 13 12 13 63 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * Directory entry file type support and forward compatibility hooks * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 * Hash Tree Directory indexing (c) * Daniel Phillips, 2001 * Hash Tree Directory indexing porting * Christopher Li, 2002 * Hash Tree Directory indexing cleanup * Theodore Ts'o, 2002 */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/time.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/bio.h> #include <linux/iversion.h> #include <linux/unicode.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * define how far ahead to read directories while searching them. */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) { struct ext4_map_blocks map; struct buffer_head *bh; int err; if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && ((inode->i_size >> 10) >= EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) return ERR_PTR(-ENOSPC); *block = inode->i_size >> inode->i_sb->s_blocksize_bits; map.m_lblk = *block; map.m_len = 1; /* * We're appending new directory block. Make sure the block is not * allocated yet, otherwise we will end up corrupting the * directory. */ err = ext4_map_blocks(NULL, inode, &map, 0); if (err < 0) return ERR_PTR(err); if (err) { EXT4_ERROR_INODE(inode, "Logical block already allocated"); return ERR_PTR(-EFSCORRUPTED); } bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE); if (IS_ERR(bh)) return bh; inode->i_size += inode->i_sb->s_blocksize; EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_mark_inode_dirty(handle, inode); if (err) goto out; BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); if (err) goto out; return bh; out: brelse(bh); ext4_std_error(inode->i_sb, err); return ERR_PTR(err); } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent); /* * Hints to ext4_read_dirblock regarding whether we expect a directory * block being read to be an index block, or a block containing * directory entries (and if the latter, whether it was found via a * logical block in an htree index block). This is used to control * what sort of sanity checkinig ext4_read_dirblock() will do on the * directory block read from the storage device. EITHER will means * the caller doesn't know what kind of directory block will be read, * so no specific verification will be done. */ typedef enum { EITHER, INDEX, DIRENT, DIRENT_HTREE } dirblock_type_t; #define ext4_read_dirblock(inode, block, type) \ __ext4_read_dirblock((inode), (block), (type), __func__, __LINE__) static struct buffer_head *__ext4_read_dirblock(struct inode *inode, ext4_lblk_t block, dirblock_type_t type, const char *func, unsigned int line) { struct buffer_head *bh; struct ext4_dir_entry *dirent; int is_dx_block = 0; if (block >= inode->i_size >> inode->i_blkbits) { ext4_error_inode(inode, func, line, block, "Attempting to read directory block (%u) that is past i_size (%llu)", block, inode->i_size); return ERR_PTR(-EFSCORRUPTED); } if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO)) bh = ERR_PTR(-EIO); else bh = ext4_bread(NULL, inode, block, 0); if (IS_ERR(bh)) { __ext4_warning(inode->i_sb, func, line, "inode #%lu: lblock %lu: comm %s: " "error %ld reading directory block", inode->i_ino, (unsigned long)block, current->comm, PTR_ERR(bh)); return bh; } /* The first directory block must not be a hole. */ if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) { ext4_error_inode(inode, func, line, block, "Directory hole found for htree %s block %u", (type == INDEX) ? "index" : "leaf", block); return ERR_PTR(-EFSCORRUPTED); } if (!bh) return NULL; dirent = (struct ext4_dir_entry *) bh->b_data; /* Determine whether or not we have an index block */ if (is_dx(inode)) { if (block == 0) is_dx_block = 1; else if (ext4_rec_len_from_disk(dirent->rec_len, inode->i_sb->s_blocksize) == inode->i_sb->s_blocksize) is_dx_block = 1; } if (!is_dx_block && type == INDEX) { ext4_error_inode(inode, func, line, block, "directory leaf block found instead of index block"); brelse(bh); return ERR_PTR(-EFSCORRUPTED); } if (!ext4_has_metadata_csum(inode->i_sb) || buffer_verified(bh)) return bh; /* * An empty leaf block can get mistaken for a index block; for * this reason, we can only check the index checksum when the * caller is sure it should be an index block. */ if (is_dx_block && type == INDEX) { if (ext4_dx_csum_verify(inode, dirent) && !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC)) set_buffer_verified(bh); else { ext4_error_inode_err(inode, func, line, block, EFSBADCRC, "Directory index failed checksum"); brelse(bh); return ERR_PTR(-EFSBADCRC); } } if (!is_dx_block) { if (ext4_dirblock_csum_verify(inode, bh) && !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC)) set_buffer_verified(bh); else { ext4_error_inode_err(inode, func, line, block, EFSBADCRC, "Directory block failed checksum"); brelse(bh); return ERR_PTR(-EFSBADCRC); } } return bh; } #ifdef DX_DEBUG #define dxtrace(command) command #else #define dxtrace(command) #endif struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; /* * dx_root_info is laid out so that if it should somehow get overlaid by a * dirent the two low bits of the hash version will be zero. Therefore, the * hash version mod 4 should never be 0. Sincerely, the paranoia department. */ struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; /* 8 */ u8 indirect_levels; u8 unused_flags; } info; struct dx_entry entries[]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[]; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; /* * This goes at the end of each htree block. */ struct dx_tail { u32 dt_reserved; __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */ }; static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); static inline unsigned dx_get_hash(struct dx_entry *entry); static void dx_set_hash(struct dx_entry *entry, unsigned value); static unsigned dx_get_count(struct dx_entry *entries); static unsigned dx_get_limit(struct dx_entry *entries); static void dx_set_count(struct dx_entry *entries, unsigned value); static void dx_set_limit(struct dx_entry *entries, unsigned value); static unsigned dx_root_limit(struct inode *dir, unsigned infosize); static unsigned dx_node_limit(struct inode *dir); static struct dx_frame *dx_probe(struct ext4_filename *fname, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame); static void dx_release(struct dx_frame *frames); static int dx_make_map(struct inode *dir, struct buffer_head *bh, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext4_dir_entry_2 *dx_move_dirents(struct inode *dir, char *from, char *to, struct dx_map_entry *offsets, int count, unsigned int blocksize); static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base, unsigned int blocksize); static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block); static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir); static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode); /* checksumming functions */ void ext4_initialize_dirent_tail(struct buffer_head *bh, unsigned int blocksize) { struct ext4_dir_entry_tail *t = EXT4_DIRENT_TAIL(bh->b_data, blocksize); memset(t, 0, sizeof(struct ext4_dir_entry_tail)); t->det_rec_len = ext4_rec_len_to_disk( sizeof(struct ext4_dir_entry_tail), blocksize); t->det_reserved_ft = EXT4_FT_DIR_CSUM; } /* Walk through a dirent block to find a checksum "dirent" at the tail */ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct buffer_head *bh) { struct ext4_dir_entry_tail *t; int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); #ifdef PARANOID struct ext4_dir_entry *d, *top; d = (struct ext4_dir_entry *)bh->b_data; top = (struct ext4_dir_entry *)(bh->b_data + (blocksize - sizeof(struct ext4_dir_entry_tail))); while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize)) d = (struct ext4_dir_entry *)(((void *)d) + ext4_rec_len_from_disk(d->rec_len, blocksize)); if (d != top) return NULL; t = (struct ext4_dir_entry_tail *)d; #else t = EXT4_DIRENT_TAIL(bh->b_data, EXT4_BLOCK_SIZE(inode->i_sb)); #endif if (t->det_reserved_zero1 || (ext4_rec_len_from_disk(t->det_rec_len, blocksize) != sizeof(struct ext4_dir_entry_tail)) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; return t; } static __le32 ext4_dirblock_csum(struct inode *inode, void *dirent, int size) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); return cpu_to_le32(csum); } #define warn_no_space_for_csum(inode) \ __warn_no_space_for_csum((inode), __func__, __LINE__) static void __warn_no_space_for_csum(struct inode *inode, const char *func, unsigned int line) { __ext4_warning_inode(inode, func, line, "No space for directory leaf checksum. Please run e2fsck -D."); } int ext4_dirblock_csum_verify(struct inode *inode, struct buffer_head *bh) { struct ext4_dir_entry_tail *t; if (!ext4_has_metadata_csum(inode->i_sb)) return 1; t = get_dirent_tail(inode, bh); if (!t) { warn_no_space_for_csum(inode); return 0; } if (t->det_checksum != ext4_dirblock_csum(inode, bh->b_data, (char *)t - bh->b_data)) return 0; return 1; } static void ext4_dirblock_csum_set(struct inode *inode, struct buffer_head *bh) { struct ext4_dir_entry_tail *t; if (!ext4_has_metadata_csum(inode->i_sb)) return; t = get_dirent_tail(inode, bh); if (!t) { warn_no_space_for_csum(inode); return; } t->det_checksum = ext4_dirblock_csum(inode, bh->b_data, (char *)t - bh->b_data); } int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dirblock_csum_set(inode, bh); return ext4_handle_dirty_metadata(handle, inode, bh); } static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dirent, int *offset) { struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize); if (rlen == blocksize) count_offset = 8; else if (rlen == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || root->info_length != sizeof(struct dx_root_info)) return NULL; count_offset = 32; } else return NULL; if (offset) *offset = count_offset; return (struct dx_countlimit *)(((void *)dirent) + count_offset); } static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, int count_offset, int count, struct dx_tail *t) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; int size; __u32 dummy_csum = 0; int offset = offsetof(struct dx_tail, dt_checksum); size = count_offset + (count * sizeof(struct dx_entry)); csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); csum = ext4_chksum(sbi, csum, (__u8 *)t, offset); csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); return cpu_to_le32(csum); } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!ext4_has_metadata_csum(inode->i_sb)) return 1; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return 0; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return 0; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset, count, t)) return 0; return 1; } static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!ext4_has_metadata_csum(inode->i_sb)) return; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t); } static inline int ext4_handle_dirty_dx_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } /* * p is at least 6 bytes before the end of page */ static inline struct ext4_dir_entry_2 * ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize) { return (struct ext4_dir_entry_2 *)((char *)p + ext4_rec_len_from_disk(p->rec_len, blocksize)); } /* * Future: use high four bits of block for coalesce-on-delete flags * Mask them off for now. */ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry) { return le32_to_cpu(entry->block) & 0x0fffffff; } static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value) { entry->block = cpu_to_le32(value); } static inline unsigned dx_get_hash(struct dx_entry *entry) { return le32_to_cpu(entry->hash); } static inline void dx_set_hash(struct dx_entry *entry, unsigned value) { entry->hash = cpu_to_le32(value); } static inline unsigned dx_get_count(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->count); } static inline unsigned dx_get_limit(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->limit); } static inline void dx_set_count(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); } static inline void dx_set_limit(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); } static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { unsigned int entry_space = dir->i_sb->s_blocksize - ext4_dir_rec_len(1, NULL) - ext4_dir_rec_len(2, NULL) - infosize; if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit(struct inode *dir) { unsigned int entry_space = dir->i_sb->s_blocksize - ext4_dir_rec_len(0, dir); if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } /* * Debug */ #ifdef DX_DEBUG static void dx_show_index(char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); printk(KERN_DEBUG "%s index", label); for (i = 0; i < n; i++) { printk(KERN_CONT " %x->%lu", i ? dx_get_hash(entries + i) : 0, (unsigned long)dx_get_block(entries + i)); } printk(KERN_CONT "\n"); } struct stats { unsigned names; unsigned space; unsigned bcount; }; static struct stats dx_show_leaf(struct inode *dir, struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; printk("names: "); while ((char *) de < base + size) { if (de->inode) { if (show_names) { #ifdef CONFIG_FS_ENCRYPTION int len; char *name; struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0); int res = 0; name = de->name; len = de->name_len; if (!IS_ENCRYPTED(dir)) { /* Directory is not encrypted */ (void) ext4fs_dirhash(dir, de->name, de->name_len, &h); printk("%*.s:(U)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); } else { struct fscrypt_str de_name = FSTR_INIT(name, len); /* Directory is encrypted */ res = fscrypt_fname_alloc_buffer( len, &fname_crypto_str); if (res) printk(KERN_WARNING "Error " "allocating crypto " "buffer--skipping " "crypto\n"); res = fscrypt_fname_disk_to_usr(dir, 0, 0, &de_name, &fname_crypto_str); if (res) { printk(KERN_WARNING "Error " "converting filename " "from disk to usr" "\n"); name = "??"; len = 2; } else { name = fname_crypto_str.name; len = fname_crypto_str.len; } if (IS_CASEFOLDED(dir)) h.hash = EXT4_DIRENT_HASH(de); else (void) ext4fs_dirhash(dir, de->name, de->name_len, &h); printk("%*.s:(E)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); fscrypt_fname_free_buffer( &fname_crypto_str); } #else int len = de->name_len; char *name = de->name; (void) ext4fs_dirhash(dir, de->name, de->name_len, &h); printk("%*.s:%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); #endif } space += ext4_dir_rec_len(de->name_len, dir); names++; } de = ext4_next_entry(de, size); } printk(KERN_CONT "(%i)\n", names); return (struct stats) { names, space, 1 }; } struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count = dx_get_count(entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { ext4_lblk_t block = dx_get_block(entries); ext4_lblk_t hash = i ? dx_get_hash(entries): 0; u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); bh = ext4_bread(NULL,dir, block, 0); if (!bh || IS_ERR(bh)) continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; brelse(bh); } if (bcount) printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", levels ? "" : " ", names, space/bcount, (space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; } /* * Linear search cross check */ static inline void htree_rep_invariant_check(struct dx_entry *at, struct dx_entry *target, u32 hash, unsigned int n) { while (n--) { dxtrace(printk(KERN_CONT ",")); if (dx_get_hash(++at) > hash) { at--; break; } } ASSERT(at == target - 1); } #else /* DX_DEBUG */ static inline void htree_rep_invariant_check(struct dx_entry *at, struct dx_entry *target, u32 hash, unsigned int n) { } #endif /* DX_DEBUG */ /* * Probe for a directory leaf block to search. * * dx_probe can return ERR_BAD_DX_DIR, which means there was a format * error in the directory index, and the caller should fall back to * searching the directory normally. The callers of dx_probe **MUST** * check for this error code, and make sure it never gets reflected * back to userspace. */ static struct dx_frame * dx_probe(struct ext4_filename *fname, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in) { unsigned count, indirect, level, i; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct dx_frame *frame = frame_in; struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR); u32 hash; ext4_lblk_t block; ext4_lblk_t blocks[EXT4_HTREE_LEVEL]; memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0])); frame->bh = ext4_read_dirblock(dir, 0, INDEX); if (IS_ERR(frame->bh)) return (struct dx_frame *) frame->bh; root = (struct dx_root *) frame->bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY && root->info.hash_version != DX_HASH_SIPHASH) { ext4_warning_inode(dir, "Unrecognised inode hash code %u", root->info.hash_version); goto fail; } if (ext4_hash_in_dirent(dir)) { if (root->info.hash_version != DX_HASH_SIPHASH) { ext4_warning_inode(dir, "Hash in dirent, but hash is not SIPHASH"); goto fail; } } else { if (root->info.hash_version == DX_HASH_SIPHASH) { ext4_warning_inode(dir, "Hash code is SIPHASH, but hash not in dirent"); goto fail; } } if (fname) hinfo = &fname->hinfo; hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; /* hash is already computed for encrypted casefolded directory */ if (fname && fname_name(fname) && !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) { int ret = ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo); if (ret < 0) { ret_err = ERR_PTR(ret); goto fail; } } hash = hinfo->hash; if (root->info.unused_flags & 1) { ext4_warning_inode(dir, "Unimplemented hash flags: %#06x", root->info.unused_flags); goto fail; } indirect = root->info.indirect_levels; if (indirect >= ext4_dir_htree_level(dir->i_sb)) { ext4_warning(dir->i_sb, "Directory (ino: %lu) htree depth %#06x exceed" "supported value", dir->i_ino, ext4_dir_htree_level(dir->i_sb)); if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) { ext4_warning(dir->i_sb, "Enable large directory " "feature to access it"); } goto fail; } entries = (struct dx_entry *)(((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning_inode(dir, "dx entry: limit %u != root limit %u", dx_get_limit(entries), dx_root_limit(dir, root->info.info_length)); goto fail; } dxtrace(printk("Look up %x", hash)); level = 0; blocks[0] = 0; while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning_inode(dir, "dx entry: count %u beyond limit %u", count, dx_get_limit(entries)); goto fail; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p) / 2; dxtrace(printk(KERN_CONT ".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } htree_rep_invariant_check(entries, p, hash, count - 1); at = p - 1; dxtrace(printk(KERN_CONT " %x->%u\n", at == entries ? 0 : dx_get_hash(at), dx_get_block(at))); frame->entries = entries; frame->at = at; block = dx_get_block(at); for (i = 0; i <= level; i++) { if (blocks[i] == block) { ext4_warning_inode(dir, "dx entry: tree cycle block %u points back to block %u", blocks[level], block); goto fail; } } if (++level > indirect) return frame; blocks[level] = block; frame++; frame->bh = ext4_read_dirblock(dir, block, INDEX); if (IS_ERR(frame->bh)) { ret_err = (struct dx_frame *) frame->bh; frame->bh = NULL; goto fail; } entries = ((struct dx_node *) frame->bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit(dir)) { ext4_warning_inode(dir, "dx entry: limit %u != node limit %u", dx_get_limit(entries), dx_node_limit(dir)); goto fail; } } fail: while (frame >= frame_in) { brelse(frame->bh); frame--; } if (ret_err == ERR_PTR(ERR_BAD_DX_DIR)) ext4_warning_inode(dir, "Corrupt directory, running e2fsck is recommended"); return ret_err; } static void dx_release(struct dx_frame *frames) { struct dx_root_info *info; int i; unsigned int indirect_levels; if (frames[0].bh == NULL) return; info = &((struct dx_root *)frames[0].bh->b_data)->info; /* save local copy, "info" may be freed after brelse() */ indirect_levels = info->indirect_levels; for (i = 0; i <= indirect_levels; i++) { if (frames[i].bh == NULL) break; brelse(frames[i].bh); frames[i].bh = NULL; } } /* * This function increments the frame pointer to search the next leaf * block, and reads in the necessary intervening nodes if the search * should be necessary. Whether or not the search is necessary is * controlled by the hash parameter. If the hash value is even, then * the search is only continued if the next block starts with that * hash value. This is used if we are searching for a specific file. * * If the hash value is HASH_NB_ALWAYS, then always go to the next block. * * This function returns 1 if the caller should continue to search, * or 0 if it should not. If there is an error reading one of the * index blocks, it will a negative error code. * * If start_hash is non-null, it will be filled in with the starting * hash of the next page. */ static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash) { struct dx_frame *p; struct buffer_head *bh; int num_frames = 0; __u32 bhash; p = frame; /* * Find the next leaf page by incrementing the frame pointer. * If we run out of entries in the interior node, loop around and * increment pointer in the parent node. When we break out of * this loop, num_frames indicates the number of interior * nodes need to be read. */ while (1) { if (++(p->at) < p->entries + dx_get_count(p->entries)) break; if (p == frames) return 0; num_frames++; p--; } /* * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the * desired continuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); if (start_hash) *start_hash = bhash; if ((hash & 1) == 0) { if ((bhash & ~1) != hash) return 0; } /* * If the hash is HASH_NB_ALWAYS, we always go to the next * block so no check is necessary */ while (num_frames--) { bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX); if (IS_ERR(bh)) return PTR_ERR(bh); p++; brelse(p->bh); p->bh = bh; p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; } return 1; } /* * This function fills a red-black tree with information from a * directory block. It returns the number directory entries loaded * into the tree. If there is an error it is returned in err. */ static int htree_dirblock_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash) { struct buffer_head *bh; struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str; int csum = ext4_has_metadata_csum(dir->i_sb); dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); bh = ext4_read_dirblock(dir, block, DIRENT_HTREE); if (IS_ERR(bh)) return PTR_ERR(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; /* csum entries are not larger in the casefolded encrypted case */ top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - ext4_dir_rec_len(0, csum ? NULL : dir)); /* Check if the directory is encrypted */ if (IS_ENCRYPTED(dir)) { err = fscrypt_prepare_readdir(dir); if (err < 0) { brelse(bh); return err; } err = fscrypt_fname_alloc_buffer(EXT4_NAME_LEN, &fname_crypto_str); if (err < 0) { brelse(bh); return err; } } for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { /* silently ignore the rest of the block */ break; } if (ext4_hash_in_dirent(dir)) { if (de->name_len && de->inode) { hinfo->hash = EXT4_DIRENT_HASH(de); hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de); } else { hinfo->hash = 0; hinfo->minor_hash = 0; } } else { err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo); if (err < 0) { count = err; goto errout; } } if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; if (!IS_ENCRYPTED(dir)) { tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de, &tmp_str); } else { int save_len = fname_crypto_str.len; struct fscrypt_str de_name = FSTR_INIT(de->name, de->name_len); /* Directory is encrypted */ err = fscrypt_fname_disk_to_usr(dir, hinfo->hash, hinfo->minor_hash, &de_name, &fname_crypto_str); if (err) { count = err; goto errout; } err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de, &fname_crypto_str); fname_crypto_str.len = save_len; } if (err != 0) { count = err; goto errout; } count++; } errout: brelse(bh); fscrypt_fname_free_buffer(&fname_crypto_str); return count; } /* * This function fills a red-black tree with information from a * directory. We start scanning the directory in hash order, starting * at start_hash and start_minor_hash. * * This function returns the number of entries inserted into the tree, * or a negative error code. */ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext4_dir_entry_2 *de; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct inode *dir; ext4_lblk_t block; int count = 0; int ret, err; __u32 hashval; struct fscrypt_str tmp_str; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = file_inode(dir_file); if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { if (ext4_hash_in_dirent(dir)) hinfo.hash_version = DX_HASH_SIPHASH; else hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; count = ext4_inlinedir_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash, &has_inline_data); if (has_inline_data) { *next_hash = ~0; return count; } } count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir, &hinfo, frames); if (IS_ERR(frame)) return PTR_ERR(frame); /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, 0, 0, de, &tmp_str); if (err != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); tmp_str.name = de->name; tmp_str.len = de->name_len; err = ext4_htree_store_dirent(dir_file, 2, 0, de, &tmp_str); if (err != 0) goto errout; count++; } while (1) { if (fatal_signal_pending(current)) { err = -ERESTARTSYS; goto errout; } cond_resched(); block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " "next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); } static inline int search_dirblock(struct buffer_head *bh, struct inode *dir, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, fname, offset, res_dir); } /* * Directory block splitting, compacting */ /* * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ static int dx_make_map(struct inode *dir, struct buffer_head *bh, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)bh->b_data; unsigned int buflen = bh->b_size; char *base = bh->b_data; struct dx_hash_info h = *hinfo; int blocksize = EXT4_BLOCK_SIZE(dir->i_sb); if (ext4_has_metadata_csum(dir->i_sb)) buflen -= sizeof(struct ext4_dir_entry_tail); while ((char *) de < base + buflen) { if (ext4_check_dir_entry(dir, NULL, de, bh, base, buflen, ((char *)de) - base)) return -EFSCORRUPTED; if (de->name_len && de->inode) { if (ext4_hash_in_dirent(dir)) h.hash = EXT4_DIRENT_HASH(de); else { int err = ext4fs_dirhash(dir, de->name, de->name_len, &h); if (err < 0) return err; } map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; map_tail->size = ext4_rec_len_from_disk(de->rec_len, blocksize); count++; cond_resched(); } de = ext4_next_entry(de, blocksize); } return count; } /* Sort map by hash value */ static void dx_sort_map (struct dx_map_entry *map, unsigned count) { struct dx_map_entry *p, *q, *top = map + count - 1; int more; /* Combsort until bubble sort doesn't suck */ while (count > 2) { count = count*10/13; if (count - 9 < 2) /* 9, 10 -> 11 */ count = 11; for (p = top, q = p - count; q >= map; p--, q--) if (p->hash < q->hash) swap(*p, *q); } /* Garden variety bubble sort */ do { more = 0; q = top; while (q-- > map) { if (q[1].hash >= q[0].hash) continue; swap(*(q+1), *q); more = 1; } } while(more); } static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) { struct dx_entry *entries = frame->entries; struct dx_entry *old = frame->at, *new = old + 1; int count = dx_get_count(entries); ASSERT(count < dx_get_limit(entries)); ASSERT(old < entries + count); memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_set_hash(new, hash); dx_set_block(new, block); dx_set_count(entries, count + 1); } #if IS_ENABLED(CONFIG_UNICODE) int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, struct ext4_filename *name) { struct qstr *cf_name = &name->cf_name; unsigned char *buf; struct dx_hash_info *hinfo = &name->hinfo; int len; if (!IS_CASEFOLDED(dir) || (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) { cf_name->name = NULL; return 0; } buf = kmalloc(EXT4_NAME_LEN, GFP_NOFS); if (!buf) return -ENOMEM; len = utf8_casefold(dir->i_sb->s_encoding, iname, buf, EXT4_NAME_LEN); if (len <= 0) { kfree(buf); buf = NULL; } cf_name->name = buf; cf_name->len = (unsigned) len; if (!IS_ENCRYPTED(dir)) return 0; hinfo->hash_version = DX_HASH_SIPHASH; hinfo->seed = NULL; if (cf_name->name) return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo); else return ext4fs_dirhash(dir, iname->name, iname->len, hinfo); } #endif /* * Test whether a directory entry matches the filename being searched for. * * Return: %true if the directory entry matches, otherwise %false. */ static bool ext4_match(struct inode *parent, const struct ext4_filename *fname, struct ext4_dir_entry_2 *de) { struct fscrypt_name f; if (!de->inode) return false; f.usr_fname = fname->usr_fname; f.disk_name = fname->disk_name; #ifdef CONFIG_FS_ENCRYPTION f.crypto_buf = fname->crypto_buf; #endif #if IS_ENABLED(CONFIG_UNICODE) if (IS_CASEFOLDED(parent) && (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) { /* * Just checking IS_ENCRYPTED(parent) below is not * sufficient to decide whether one can use the hash for * skipping the string comparison, because the key might * have been added right after * ext4_fname_setup_ci_filename(). In this case, a hash * mismatch will be a false negative. Therefore, make * sure cf_name was properly initialized before * considering the calculated hash. */ if (IS_ENCRYPTED(parent) && fname->cf_name.name && (fname->hinfo.hash != EXT4_DIRENT_HASH(de) || fname->hinfo.minor_hash != EXT4_DIRENT_MINOR_HASH(de))) return false; /* * Treat comparison errors as not a match. The * only case where it happens is on a disk * corruption or ENOMEM. */ return generic_ci_match(parent, fname->usr_fname, &fname->cf_name, de->name, de->name_len) > 0; } #endif return fscrypt_match_name(&f, de->name, de->name_len); } /* * Returns 0 if not found, -EFSCORRUPTED on failure, and 1 on success */ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; int de_len; de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ if (de->name + de->name_len <= dlimit && ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, buf_size, offset)) return -EFSCORRUPTED; *res_dir = de; return 1; } /* prevent looping on a bad block */ de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); if (de_len <= 0) return -EFSCORRUPTED; offset += de_len; de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); } return 0; } static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, struct ext4_dir_entry *de) { struct super_block *sb = dir->i_sb; if (!is_dx(dir)) return 0; if (block == 0) return 1; if (de->inode == 0 && ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) == sb->s_blocksize) return 1; return 0; } /* * __ext4_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * The returned buffer_head has ->b_count elevated. The caller is expected * to brelse() it when appropriate. */ static struct buffer_head *__ext4_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block; const u8 *name = fname->usr_fname->name; size_t ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ size_t ra_ptr = 0; /* Current index into readahead buffer */ ext4_lblk_t nblocks; int i, namelen, retval; *res_dir = NULL; sb = dir->i_sb; namelen = fname->usr_fname->len; if (namelen > EXT4_NAME_LEN) return NULL; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, fname, res_dir, &has_inline_data); if (inlined) *inlined = has_inline_data; if (has_inline_data || IS_ERR(ret)) goto cleanup_and_exit; } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { ret = ext4_dx_find_entry(dir, fname, res_dir); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR) goto cleanup_and_exit; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); ret = NULL; } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (!nblocks) { ret = NULL; goto cleanup_and_exit; } start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ cond_resched(); if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; if (block < start) ra_max = start - block; else ra_max = nblocks - block; ra_max = min(ra_max, ARRAY_SIZE(bh_use)); retval = ext4_bread_batch(dir, block, ra_max, false /* wait */, bh_use); if (retval) { ret = ERR_PTR(retval); ra_max = 0; goto cleanup_and_exit; } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { EXT4_ERROR_INODE_ERR(dir, EIO, "reading directory lblock %lu", (unsigned long) block); brelse(bh); ret = ERR_PTR(-EIO); goto cleanup_and_exit; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirblock_csum_verify(dir, bh)) { EXT4_ERROR_INODE_ERR(dir, EFSBADCRC, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); ret = ERR_PTR(-EFSBADCRC); goto cleanup_and_exit; } set_buffer_verified(bh); i = search_dirblock(bh, dir, fname, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) { ret = ERR_PTR(i); goto cleanup_and_exit; } } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); return ret; } static struct buffer_head *ext4_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { int err; struct ext4_filename fname; struct buffer_head *bh; err = ext4_fname_setup_filename(dir, d_name, 1, &fname); if (err == -ENOENT) return NULL; if (err) return ERR_PTR(err); bh = __ext4_find_entry(dir, &fname, res_dir, inlined); ext4_fname_free_filename(&fname); return bh; } static struct buffer_head *ext4_lookup_entry(struct inode *dir, struct dentry *dentry, struct ext4_dir_entry_2 **res_dir) { int err; struct ext4_filename fname; struct buffer_head *bh; err = ext4_fname_prepare_lookup(dir, dentry, &fname); if (err == -ENOENT) return NULL; if (err) return ERR_PTR(err); bh = __ext4_find_entry(dir, &fname, res_dir, NULL); ext4_fname_free_filename(&fname); return bh; } static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir) { struct super_block * sb = dir->i_sb; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct buffer_head *bh; ext4_lblk_t block; int retval; #ifdef CONFIG_FS_ENCRYPTION *res_dir = NULL; #endif frame = dx_probe(fname, dir, NULL, frames); if (IS_ERR(frame)) return ERR_CAST(frame); do { block = dx_get_block(frame->at); bh = ext4_read_dirblock(dir, block, DIRENT_HTREE); if (IS_ERR(bh)) goto errout; retval = search_dirblock(bh, dir, fname, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (retval == 1) goto success; brelse(bh); if (retval < 0) { bh = ERR_PTR(ERR_BAD_DX_DIR); goto errout; } /* Check to see if we should continue to search */ retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame, frames, NULL); if (retval < 0) { ext4_warning_inode(dir, "error %d reading directory index block", retval); bh = ERR_PTR(retval); goto errout; } } while (retval == 1); bh = NULL; errout: dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name)); success: dx_release(frames); return bh; } static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; struct ext4_dir_entry_2 *de; struct buffer_head *bh; if (dentry->d_name.len > EXT4_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); bh = ext4_lookup_entry(dir, dentry, &de); if (IS_ERR(bh)) return ERR_CAST(bh); inode = NULL; if (bh) { __u32 ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(dir->i_sb, ino)) { EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EFSCORRUPTED); } if (unlikely(ino == dir->i_ino)) { EXT4_ERROR_INODE(dir, "'%pd' linked to parent dir", dentry); return ERR_PTR(-EFSCORRUPTED); } inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", ino); return ERR_PTR(-EFSCORRUPTED); } if (!IS_ERR(inode) && IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { ext4_warning(inode->i_sb, "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); iput(inode); return ERR_PTR(-EPERM); } } if (IS_ENABLED(CONFIG_UNICODE) && !inode && IS_CASEFOLDED(dir)) { /* Eventually we want to call d_add_ci(dentry, NULL) * for negative dentries in the encoding case as * well. For now, prevent the negative dentry * from being cached. */ return NULL; } return d_splice_alias(inode, dentry); } struct dentry *ext4_get_parent(struct dentry *child) { __u32 ino; struct ext4_dir_entry_2 * de; struct buffer_head *bh; bh = ext4_find_entry(d_inode(child), &dotdot_name, &de, NULL); if (IS_ERR(bh)) return ERR_CAST(bh); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(child->d_sb, ino)) { EXT4_ERROR_INODE(d_inode(child), "bad parent inode number: %u", ino); return ERR_PTR(-EFSCORRUPTED); } return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL)); } /* * Move count entries from end of map between two memory locations. * Returns pointer to last entry moved. */ static struct ext4_dir_entry_2 * dx_move_dirents(struct inode *dir, char *from, char *to, struct dx_map_entry *map, int count, unsigned blocksize) { unsigned rec_len = 0; while (count--) { struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); rec_len = ext4_dir_rec_len(de->name_len, dir); memcpy (to, de, rec_len); ((struct ext4_dir_entry_2 *) to)->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); /* wipe dir_entry excluding the rec_len field */ de->inode = 0; memset(&de->name_len, 0, ext4_rec_len_from_disk(de->rec_len, blocksize) - offsetof(struct ext4_dir_entry_2, name_len)); map++; to += rec_len; } return (struct ext4_dir_entry_2 *) (to - rec_len); } /* * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base, unsigned int blocksize) { struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; unsigned rec_len = 0; prev = to = de; while ((char*)de < base + blocksize) { next = ext4_next_entry(de, blocksize); if (de->inode && de->name_len) { rec_len = ext4_dir_rec_len(de->name_len, dir); if (de > to) memmove(to, de, rec_len); to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); prev = to; to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len); } de = next; } return prev; } /* * Split a full leaf block to make room for a new dir entry. * Allocate a new block, and move entries so that they are approx. equally full. * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, struct dx_hash_info *hinfo) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned continued; int count; struct buffer_head *bh2; ext4_lblk_t newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; int csum_size = 0; int err = 0, i; if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); bh2 = ext4_append(handle, dir, &newblock); if (IS_ERR(bh2)) { brelse(*bh); *bh = NULL; return ERR_CAST(bh2); } BUFFER_TRACE(*bh, "get_write_access"); err = ext4_journal_get_write_access(handle, dir->i_sb, *bh, EXT4_JTR_NONE); if (err) goto journal_error; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, dir->i_sb, frame->bh, EXT4_JTR_NONE); if (err) goto journal_error; data2 = bh2->b_data; /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); count = dx_make_map(dir, *bh, hinfo, map); if (count < 0) { err = count; goto journal_error; } map -= count; dx_sort_map(map, count); /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { /* is more than half of this entry in 2nd half of the block? */ if (size + map[i].size/2 > blocksize/2) break; size += map[i].size; move++; } /* * map index at which we will split * * If the sum of active entries didn't exceed half the block size, just * split it in half by count; each resulting block will have at least * half the space free. */ if (i > 0) split = count - move; else split = count/2; if (WARN_ON_ONCE(split == 0)) { /* Should never happen, but avoid out-of-bounds access below */ ext4_error_inode_block(dir, (*bh)->b_blocknr, 0, "bad indexed directory? hash=%08x:%08x count=%d move=%u", hinfo->hash, hinfo->minor_hash, count, move); err = -EFSCORRUPTED; goto out; } hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", (unsigned long)dx_get_block(frame->at), hash2, split, count-split)); /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(dir, data1, data2, map + split, count - split, blocksize); de = dx_pack_dirents(dir, data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de2, blocksize); if (csum_size) { ext4_initialize_dirent_tail(*bh, blocksize); ext4_initialize_dirent_tail(bh2, blocksize); } dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } dx_insert_block(frame, hash2 + continued, newblock); err = ext4_handle_dirty_dirblock(handle, dir, bh2); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; brelse(bh2); dxtrace(dx_show_index("frame", frame->entries)); return de; journal_error: ext4_std_error(dir->i_sb, err); out: brelse(*bh); brelse(bh2); *bh = NULL; return ERR_PTR(err); } int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, struct ext4_filename *fname, struct ext4_dir_entry_2 **dest_de) { struct ext4_dir_entry_2 *de; unsigned short reclen = ext4_dir_rec_len(fname_len(fname), dir); int nlen, rlen; unsigned int offset = 0; char *top; de = buf; top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, buf, buf_size, offset)) return -EFSCORRUPTED; if (ext4_match(dir, fname, de)) return -EEXIST; nlen = ext4_dir_rec_len(de->name_len, dir); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) break; de = (struct ext4_dir_entry_2 *)((char *)de + rlen); offset += rlen; } if ((char *) de > top) return -ENOSPC; *dest_de = de; return 0; } void ext4_insert_dentry(struct inode *dir, struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, struct ext4_filename *fname) { int nlen, rlen; nlen = ext4_dir_rec_len(de->name_len, dir); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); de = de1; } de->file_type = EXT4_FT_UNKNOWN; de->inode = cpu_to_le32(inode->i_ino); ext4_set_de_type(inode->i_sb, de, inode->i_mode); de->name_len = fname_len(fname); memcpy(de->name, fname_name(fname), fname_len(fname)); if (ext4_hash_in_dirent(dir)) { struct dx_hash_info *hinfo = &fname->hinfo; EXT4_DIRENT_HASHES(de)->hash = cpu_to_le32(hinfo->hash); EXT4_DIRENT_HASHES(de)->minor_hash = cpu_to_le32(hinfo->minor_hash); } } /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large * enough for new directory entry. If de is NULL, then * add_dirent_to_buf will attempt search the directory block for * space. It will return -ENOSPC if no space is available, and -EIO * and -EEXIST if directory entry already exists. */ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err, err2; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, fname, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, dir->i_sb, bh, EXT4_JTR_NONE); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(dir, inode, de, blocksize, fname); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); ext4_update_dx_flag(dir); inode_inc_iversion(dir); err2 = ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirblock(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return err ? err : err2; } static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root) { struct fake_dirent *fde; const char *error_msg; unsigned int rlen; unsigned int blocksize = dir->i_sb->s_blocksize; char *blockend = (char *)root + dir->i_sb->s_blocksize; fde = &root->dot; if (unlikely(fde->name_len != 1)) { error_msg = "invalid name_len for '.'"; goto corrupted; } if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) { error_msg = "invalid name for '.'"; goto corrupted; } rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize); if (unlikely((char *)fde + rlen >= blockend)) { error_msg = "invalid rec_len for '.'"; goto corrupted; } fde = &root->dotdot; if (unlikely(fde->name_len != 2)) { error_msg = "invalid name_len for '..'"; goto corrupted; } if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) { error_msg = "invalid name for '..'"; goto corrupted; } rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize); if (unlikely((char *)fde + rlen >= blockend)) { error_msg = "invalid rec_len for '..'"; goto corrupted; } return true; corrupted: EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended", error_msg); return false; } /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. */ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode, struct buffer_head *bh) { struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct dx_entry *entries; struct ext4_dir_entry_2 *de, *de2; char *data2, *top; unsigned len; int retval; unsigned blocksize; ext4_lblk_t block; struct fake_dirent *fde; int csum_size = 0; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); BUFFER_TRACE(bh, "get_write_access"); retval = ext4_journal_get_write_access(handle, dir->i_sb, bh, EXT4_JTR_NONE); if (retval) { ext4_std_error(dir->i_sb, retval); brelse(bh); return retval; } root = (struct dx_root *) bh->b_data; if (!ext4_check_dx_root(dir, root)) { brelse(bh); return -EFSCORRUPTED; } /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block); if (IS_ERR(bh2)) { brelse(bh); return PTR_ERR(bh2); } ext4_set_inode_flag(dir, EXT4_INODE_INDEX); data2 = bh2->b_data; memcpy(data2, de, len); memset(de, 0, len); /* wipe old data */ de = (struct ext4_dir_entry_2 *) data2; top = data2 + len; while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) { if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len, (char *)de - data2)) { brelse(bh2); brelse(bh); return -EFSCORRUPTED; } de = de2; } de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de, blocksize); if (csum_size) ext4_initialize_dirent_tail(bh2, blocksize); /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); de->rec_len = ext4_rec_len_to_disk( blocksize - ext4_dir_rec_len(2, NULL), blocksize); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); if (ext4_hash_in_dirent(dir)) root->info.hash_version = DX_HASH_SIPHASH; else root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; entries = root->entries; dx_set_block(entries, 1); dx_set_count(entries, 1); dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); /* Initialize as for dx_probe */ fname->hinfo.hash_version = root->info.hash_version; if (fname->hinfo.hash_version <= DX_HASH_TEA) fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; /* casefolded encrypted hashes are computed on fname setup */ if (!ext4_hash_in_dirent(dir)) { int err = ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), &fname->hinfo); if (err < 0) { brelse(bh2); brelse(bh); return err; } } memset(frames, 0, sizeof(frames)); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (retval) goto out_frames; retval = ext4_handle_dirty_dirblock(handle, dir, bh2); if (retval) goto out_frames; de = do_split(handle,dir, &bh2, frame, &fname->hinfo); if (IS_ERR(de)) { retval = PTR_ERR(de); goto out_frames; } retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2); out_frames: /* * Even if the block split failed, we have to properly write * out all the changes we did so far. Otherwise we can end up * with corrupted filesystem. */ if (retval) ext4_mark_inode_dirty(handle, dir); dx_release(frames); brelse(bh2); return retval; } /* * ext4_add_entry() * * adds a file entry to the specified directory, using the same * semantics as ext4_find_entry(). It returns NULL if it failed. * * NOTE!! The inode part of 'de' is left at 0 - which means you * may not sleep between calling this and putting something into * the entry, as someone else might have used it while you slept. */ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); struct buffer_head *bh = NULL; struct ext4_dir_entry_2 *de; struct super_block *sb; struct ext4_filename fname; int retval; int dx_fallback=0; unsigned blocksize; ext4_lblk_t block, blocks; int csum_size = 0; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; blocksize = sb->s_blocksize; if (fscrypt_is_nokey_name(dentry)) return -ENOKEY; if (!generic_ci_validate_strict_name(dir, &dentry->d_name)) return -EINVAL; retval = ext4_fname_setup_filename(dir, &dentry->d_name, 0, &fname); if (retval) return retval; if (ext4_has_inline_data(dir)) { retval = ext4_try_add_inline_entry(handle, &fname, dir, inode); if (retval < 0) goto out; if (retval == 1) { retval = 0; goto out; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, &fname, dir, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) goto out; /* Can we just ignore htree data? */ if (ext4_has_metadata_csum(sb)) { EXT4_ERROR_INODE(dir, "Directory has corrupted htree index."); retval = -EFSCORRUPTED; goto out; } ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; retval = ext4_mark_inode_dirty(handle, dir); if (unlikely(retval)) goto out; } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0; block < blocks; block++) { bh = ext4_read_dirblock(dir, block, DIRENT); if (bh == NULL) { bh = ext4_bread(handle, dir, block, EXT4_GET_BLOCKS_CREATE); goto add_to_new_block; } if (IS_ERR(bh)) { retval = PTR_ERR(bh); bh = NULL; goto out; } retval = add_dirent_to_buf(handle, &fname, dir, inode, NULL, bh); if (retval != -ENOSPC) goto out; if (blocks == 1 && !dx_fallback && ext4_has_feature_dir_index(sb)) { retval = make_indexed_dir(handle, &fname, dir, inode, bh); bh = NULL; /* make_indexed_dir releases bh */ goto out; } brelse(bh); } bh = ext4_append(handle, dir, &block); add_to_new_block: if (IS_ERR(bh)) { retval = PTR_ERR(bh); bh = NULL; goto out; } de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize); if (csum_size) ext4_initialize_dirent_tail(bh, blocksize); retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh); out: ext4_fname_free_filename(&fname); brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); return retval; } /* * Returns 0 for success, or a negative error value */ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode) { struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; struct dx_entry *entries, *at; struct buffer_head *bh; struct super_block *sb = dir->i_sb; struct ext4_dir_entry_2 *de; int restart; int err; again: restart = 0; frame = dx_probe(fname, dir, NULL, frames); if (IS_ERR(frame)) return PTR_ERR(frame); entries = frame->entries; at = frame->at; bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE); if (IS_ERR(bh)) { err = PTR_ERR(bh); bh = NULL; goto cleanup; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); if (err) goto journal_error; err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh); if (err != -ENOSPC) goto cleanup; err = 0; /* Block full, should compress but for now just split */ dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", dx_get_count(entries), dx_get_limit(entries))); /* Need to split index? */ if (dx_get_count(entries) == dx_get_limit(entries)) { ext4_lblk_t newblock; int levels = frame - frames + 1; unsigned int icount; int add_level = 1; struct dx_entry *entries2; struct dx_node *node2; struct buffer_head *bh2; while (frame > frames) { if (dx_get_count((frame - 1)->entries) < dx_get_limit((frame - 1)->entries)) { add_level = 0; break; } frame--; /* split higher index block */ at = frame->at; entries = frame->entries; restart = 1; } if (add_level && levels == ext4_dir_htree_level(sb)) { ext4_warning(sb, "Directory (ino: %lu) index full, " "reach max htree level :%d", dir->i_ino, levels); if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) { ext4_warning(sb, "Large directory feature is " "not enabled on this " "filesystem"); } err = -ENOSPC; goto cleanup; } icount = dx_get_count(entries); bh2 = ext4_append(handle, dir, &newblock); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto cleanup; } node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize, sb->s_blocksize); BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, frame->bh, EXT4_JTR_NONE); if (err) goto journal_error; if (!add_level) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); dxtrace(printk(KERN_DEBUG "Split index %i/%i\n", icount1, icount2)); BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ err = ext4_journal_get_write_access(handle, sb, (frame - 1)->bh, EXT4_JTR_NONE); if (err) goto journal_error; memcpy((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); dx_set_count(entries, icount1); dx_set_count(entries2, icount2); dx_set_limit(entries2, dx_node_limit(dir)); /* Which index block gets the new entry? */ if (at - entries >= icount1) { frame->at = at - entries - icount1 + entries2; frame->entries = entries = entries2; swap(frame->bh, bh2); } dx_insert_block((frame - 1), hash2, newblock); dxtrace(dx_show_index("node", frame->entries)); dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext4_handle_dirty_dx_node(handle, dir, bh2); if (err) goto journal_error; brelse (bh2); err = ext4_handle_dirty_dx_node(handle, dir, (frame - 1)->bh); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (restart || err) goto journal_error; } else { struct dx_root *dxroot; memcpy((char *) entries2, (char *) entries, icount * sizeof(struct dx_entry)); dx_set_limit(entries2, dx_node_limit(dir)); /* Set up root */ dx_set_count(entries, 1); dx_set_block(entries + 0, newblock); dxroot = (struct dx_root *)frames[0].bh->b_data; dxroot->info.indirect_levels += 1; dxtrace(printk(KERN_DEBUG "Creating %d level index...\n", dxroot->info.indirect_levels)); err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, bh2); brelse(bh2); restart = 1; goto journal_error; } } de = do_split(handle, dir, &bh, frame, &fname->hinfo); if (IS_ERR(de)) { err = PTR_ERR(de); goto cleanup; } err = add_dirent_to_buf(handle, fname, dir, inode, de, bh); goto cleanup; journal_error: ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */ cleanup: brelse(bh); dx_release(frames); /* @restart is true means htree-path has been changed, we need to * repeat dx_probe() to find out valid htree-path */ if (restart && err == 0) goto again; return err; } /* * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ int ext4_generic_delete_entry(struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, int buf_size, int csum_size) { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; int i; i = 0; pde = NULL; de = entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) { pde->rec_len = ext4_rec_len_to_disk( ext4_rec_len_from_disk(pde->rec_len, blocksize) + ext4_rec_len_from_disk(de->rec_len, blocksize), blocksize); /* wipe entire dir_entry */ memset(de, 0, ext4_rec_len_from_disk(de->rec_len, blocksize)); } else { /* wipe dir_entry excluding the rec_len field */ de->inode = 0; memset(&de->name_len, 0, ext4_rec_len_from_disk(de->rec_len, blocksize) - offsetof(struct ext4_dir_entry_2, name_len)); } inode_inc_iversion(dir); return 0; } i += ext4_rec_len_from_disk(de->rec_len, blocksize); pde = de; de = ext4_next_entry(de, blocksize); } return -ENOENT; } static int ext4_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh) { int err, csum_size = 0; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; err = ext4_delete_inline_entry(handle, dir, de_del, bh, &has_inline_data); if (has_inline_data) return err; } if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, dir->i_sb, bh, EXT4_JTR_NONE); if (unlikely(err)) goto out; err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirblock(handle, dir, bh); if (unlikely(err)) goto out; return 0; out: if (err != -ENOENT) ext4_std_error(dir->i_sb, err); return err; } /* * Set directory link count to 1 if nlinks > EXT4_LINK_MAX, or if nlinks == 2 * since this indicates that nlinks count was previously 1 to avoid overflowing * the 16-bit i_links_count field on disk. Directories with i_nlink == 1 mean * that subdirectory link counts are not being maintained accurately. * * The caller has already checked for i_nlink overflow in case the DIR_LINK * feature is not enabled and returned -EMLINK. The is_dx() check is a proxy * for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set * on regular files) and to avoid creating huge/slow non-HTREE directories. */ static void ext4_inc_count(struct inode *inode) { inc_nlink(inode); if (is_dx(inode) && (inode->i_nlink > EXT4_LINK_MAX || inode->i_nlink == 2)) set_nlink(inode, 1); } /* * If a directory had nlink == 1, then we should let it be 1. This indicates * directory has >EXT4_LINK_MAX subdirs. */ static void ext4_dec_count(struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); } /* * Add non-directory inode to a directory. On success, the inode reference is * consumed by dentry is instantiation. This is also indicated by clearing of * *inodep pointer. On failure, the caller is responsible for dropping the * inode reference in the safe context. */ static int ext4_add_nondir(handle_t *handle, struct dentry *dentry, struct inode **inodep) { struct inode *dir = d_inode(dentry->d_parent); struct inode *inode = *inodep; int err = ext4_add_entry(handle, dentry, inode); if (!err) { err = ext4_mark_inode_dirty(handle, inode); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); d_instantiate_new(dentry, inode); *inodep = NULL; return err; } drop_nlink(inode); ext4_mark_inode_dirty(handle, inode); ext4_orphan_add(handle, inode); unlock_new_inode(inode); return err; } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int ext4_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { handle_t *handle; struct inode *inode; int err, credits, retries = 0; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(idmap, dir, mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, &inode); if (!err) ext4_fc_track_create(handle, dentry); } if (handle) ext4_journal_stop(handle); if (!IS_ERR_OR_NULL(inode)) iput(inode); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, credits, retries = 0; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(idmap, dir, mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; err = ext4_add_nondir(handle, dentry, &inode); if (!err) ext4_fc_track_create(handle, dentry); } if (handle) ext4_journal_stop(handle); if (!IS_ERR_OR_NULL(inode)) iput(inode); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { handle_t *handle; struct inode *inode; int err, retries = 0; err = dquot_initialize(dir); if (err) return err; retry: inode = ext4_new_inode_start_handle(idmap, dir, mode, NULL, 0, NULL, EXT4_HT_DIR, EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) + 4 + EXT4_XATTR_TRANS_BLOCKS); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); d_tmpfile(file, inode); err = ext4_orphan_add(handle, inode); if (err) goto err_unlock_inode; mark_inode_dirty(inode); unlock_new_inode(inode); } if (handle) ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return finish_open_simple(file, err); err_unlock_inode: ext4_journal_stop(handle); unlock_new_inode(inode); return err; } struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len) { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext4_rec_len_to_disk(ext4_dir_rec_len(de->name_len, NULL), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(parent_ino); de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - (csum_size + ext4_dir_rec_len(1, NULL)), blocksize); else de->rec_len = ext4_rec_len_to_disk( ext4_dir_rec_len(de->name_len, NULL), blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); return ext4_next_entry(de, blocksize); } int ext4_init_new_dir(handle_t *handle, struct inode *dir, struct inode *inode) { struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; ext4_lblk_t block = 0; unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err; if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { err = ext4_try_create_inline_dir(handle, dir, inode); if (err < 0 && err != -ENOSPC) goto out; if (!err) goto out; } inode->i_size = 0; dir_block = ext4_append(handle, inode, &block); if (IS_ERR(dir_block)) return PTR_ERR(dir_block); de = (struct ext4_dir_entry_2 *)dir_block->b_data; ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); set_nlink(inode, 2); if (csum_size) ext4_initialize_dirent_tail(dir_block, blocksize); BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirblock(handle, inode, dir_block); if (err) goto out; set_buffer_verified(dir_block); out: brelse(dir_block); return err; } static int ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; int err, err2 = 0, credits, retries = 0; if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; err = dquot_initialize(dir); if (err) return err; credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3); retry: inode = ext4_new_inode_start_handle(idmap, dir, S_IFDIR | mode, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; err = ext4_init_new_dir(handle, dir, inode); if (err) goto out_clear_inode; err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); if (err) { out_clear_inode: clear_nlink(inode); ext4_orphan_add(handle, inode); unlock_new_inode(inode); err2 = ext4_mark_inode_dirty(handle, inode); if (unlikely(err2)) err = err2; ext4_journal_stop(handle); iput(inode); goto out_retry; } ext4_inc_count(dir); ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; d_instantiate_new(dentry, inode); ext4_fc_track_create(handle, dentry); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); out_stop: if (handle) ext4_journal_stop(handle); out_retry: if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * routine to check that the specified directory is empty (for rmdir) */ bool ext4_empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; struct ext4_dir_entry_2 *de; struct super_block *sb; if (ext4_has_inline_data(inode)) { int has_inline_data = 1; int ret; ret = empty_inline_dir(inode, &has_inline_data); if (has_inline_data) return ret; } sb = inode->i_sb; if (inode->i_size < ext4_dir_rec_len(1, NULL) + ext4_dir_rec_len(2, NULL)) { EXT4_ERROR_INODE(inode, "invalid size"); return false; } bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) return false; de = (struct ext4_dir_entry_2 *) bh->b_data; if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0) || le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { ext4_warning_inode(inode, "directory missing '.'"); brelse(bh); return false; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset) || le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { ext4_warning_inode(inode, "directory missing '..'"); brelse(bh); return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); while (offset < inode->i_size) { if (!(offset & (sb->s_blocksize - 1))) { unsigned int lblock; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); bh = ext4_read_dirblock(inode, lblock, EITHER); if (bh == NULL) { offset += sb->s_blocksize; continue; } if (IS_ERR(bh)) return false; } de = (struct ext4_dir_entry_2 *) (bh->b_data + (offset & (sb->s_blocksize - 1))); if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset) || le32_to_cpu(de->inode)) { brelse(bh); return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); } brelse(bh); return true; } static int ext4_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; if (unlikely(ext4_forced_shutdown(dir->i_sb))) return -EIO; /* Initialize quotas before so that eventual writes go in * separate transaction */ retval = dquot_initialize(dir); if (retval) return retval; retval = dquot_initialize(d_inode(dentry)); if (retval) return retval; retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) goto end_rmdir; inode = d_inode(dentry); retval = -EFSCORRUPTED; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!ext4_empty_dir(inode)) goto end_rmdir; handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_rmdir; } if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_rmdir; if (!EXT4_DIR_LINK_EMPTY(inode)) ext4_warning_inode(inode, "empty directory '%.*s' has too many links (%u)", dentry->d_name.len, dentry->d_name.name, inode->i_nlink); inode_inc_iversion(inode); clear_nlink(inode); /* There's no need to set i_disksize: the fact that i_nlink is * zero will ensure that the right thing happens during any * recovery. */ inode->i_size = 0; ext4_orphan_add(handle, inode); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_ctime_current(inode); retval = ext4_mark_inode_dirty(handle, inode); if (retval) goto end_rmdir; ext4_dec_count(dir); ext4_update_dx_flag(dir); ext4_fc_track_unlink(handle, dentry); retval = ext4_mark_inode_dirty(handle, dir); /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the * negative dentries at ext4_lookup(), when it is better * supported by the VFS for the CI case. */ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir)) d_invalidate(dentry); end_rmdir: brelse(bh); if (handle) ext4_journal_stop(handle); return retval; } int __ext4_unlink(struct inode *dir, const struct qstr *d_name, struct inode *inode, struct dentry *dentry /* NULL during fast_commit recovery */) { int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; int skip_remove_dentry = 0; /* * Keep this outside the transaction; it may have to set up the * directory's encryption key, which isn't GFP_NOFS-safe. */ bh = ext4_find_entry(dir, d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) return -ENOENT; if (le32_to_cpu(de->inode) != inode->i_ino) { /* * It's okay if we find dont find dentry which matches * the inode. That's because it might have gotten * renamed to a different inode number */ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) skip_remove_dentry = 1; else goto out_bh; } handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); goto out_bh; } if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); if (!skip_remove_dentry) { retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto out_handle; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); if (retval) goto out_handle; } else { retval = 0; } if (inode->i_nlink == 0) ext4_warning_inode(inode, "Deleting file '%.*s' with no links", d_name->len, d_name->name); else drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode_set_ctime_current(inode); retval = ext4_mark_inode_dirty(handle, inode); if (dentry && !retval) ext4_fc_track_unlink(handle, dentry); out_handle: ext4_journal_stop(handle); out_bh: brelse(bh); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { int retval; if (unlikely(ext4_forced_shutdown(dir->i_sb))) return -EIO; trace_ext4_unlink_enter(dir, dentry); /* * Initialize quotas before so that eventual writes go * in separate transaction */ retval = dquot_initialize(dir); if (retval) goto out_trace; retval = dquot_initialize(d_inode(dentry)); if (retval) goto out_trace; retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry); /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the * negative dentries at ext4_lookup(), when it is better * supported by the VFS for the CI case. */ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir)) d_invalidate(dentry); out_trace: trace_ext4_unlink_exit(dentry, retval); return retval; } static int ext4_init_symlink_block(handle_t *handle, struct inode *inode, struct fscrypt_str *disk_link) { struct buffer_head *bh; char *kaddr; int err = 0; bh = ext4_bread(handle, inode, 0, EXT4_GET_BLOCKS_CREATE); if (IS_ERR(bh)) return PTR_ERR(bh); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); if (err) goto out; kaddr = (char *)bh->b_data; memcpy(kaddr, disk_link->name, disk_link->len); inode->i_size = disk_link->len - 1; EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_handle_dirty_metadata(handle, inode, bh); out: brelse(bh); return err; } static int ext4_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { handle_t *handle; struct inode *inode; int err, len = strlen(symname); int credits; struct fscrypt_str disk_link; int retries = 0; if (unlikely(ext4_forced_shutdown(dir->i_sb))) return -EIO; err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize, &disk_link); if (err) return err; err = dquot_initialize(dir); if (err) return err; /* * EXT4_INDEX_EXTRA_TRANS_BLOCKS for addition of entry into the * directory. +3 for inode, inode bitmap, group descriptor allocation. * EXT4_DATA_TRANS_BLOCKS for the data block allocation and * modification. */ credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3; retry: inode = ext4_new_inode_start_handle(idmap, dir, S_IFLNK|S_IRWXUGO, &dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); if (IS_ERR(inode)) { if (handle) ext4_journal_stop(handle); err = PTR_ERR(inode); goto out_retry; } if (IS_ENCRYPTED(inode)) { err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link); if (err) goto err_drop_inode; inode->i_op = &ext4_encrypted_symlink_inode_operations; } else { if ((disk_link.len > EXT4_N_BLOCKS * 4)) { inode->i_op = &ext4_symlink_inode_operations; } else { inode->i_op = &ext4_fast_symlink_inode_operations; } } if ((disk_link.len > EXT4_N_BLOCKS * 4)) { /* alloc symlink block and fill it */ err = ext4_init_symlink_block(handle, inode, &disk_link); if (err) goto err_drop_inode; } else { /* clear the extent format for fast symlink */ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name, disk_link.len); inode->i_size = disk_link.len - 1; EXT4_I(inode)->i_disksize = inode->i_size; if (!IS_ENCRYPTED(inode)) inode_set_cached_link(inode, (char *)&EXT4_I(inode)->i_data, inode->i_size); } err = ext4_add_nondir(handle, dentry, &inode); if (handle) ext4_journal_stop(handle); iput(inode); goto out_retry; err_drop_inode: clear_nlink(inode); ext4_mark_inode_dirty(handle, inode); ext4_orphan_add(handle, inode); unlock_new_inode(inode); if (handle) ext4_journal_stop(handle); iput(inode); out_retry: if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; if (disk_link.name != (unsigned char *)symname) kfree(disk_link.name); return err; } int __ext4_link(struct inode *dir, struct inode *inode, struct dentry *dentry) { handle_t *handle; int err, retries = 0; retry: handle = ext4_journal_start(dir, EXT4_HT_DIR, (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode_set_ctime_current(inode); ext4_inc_count(inode); ihold(inode); err = ext4_add_entry(handle, dentry, inode); if (!err) { err = ext4_mark_inode_dirty(handle, inode); /* this can happen only for tmpfile being * linked the first time */ if (inode->i_nlink == 1) ext4_orphan_del(handle, inode); d_instantiate(dentry, inode); ext4_fc_track_link(handle, dentry); } else { drop_nlink(inode); iput(inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); int err; if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; err = fscrypt_prepare_link(old_dentry, dir, dentry); if (err) return err; if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; err = dquot_initialize(dir); if (err) return err; return __ext4_link(dir, inode, dentry); } /* * Try to find buffer head where contains the parent block. * It should be the inode block if it is inlined or the 1st block * if it is a normal dir. */ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct inode *inode, int *retval, struct ext4_dir_entry_2 **parent_de, int *inlined) { struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { struct ext4_dir_entry_2 *de; unsigned int offset; bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) { *retval = PTR_ERR(bh); return NULL; } de = (struct ext4_dir_entry_2 *) bh->b_data; if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0) || le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { EXT4_ERROR_INODE(inode, "directory missing '.'"); brelse(bh); *retval = -EFSCORRUPTED; return NULL; } offset = ext4_rec_len_from_disk(de->rec_len, inode->i_sb->s_blocksize); de = ext4_next_entry(de, inode->i_sb->s_blocksize); if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset) || le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { EXT4_ERROR_INODE(inode, "directory missing '..'"); brelse(bh); *retval = -EFSCORRUPTED; return NULL; } *parent_de = de; return bh; } *inlined = 1; return ext4_get_first_inline_block(inode, parent_de, retval); } struct ext4_renament { struct inode *dir; struct dentry *dentry; struct inode *inode; bool is_dir; int dir_nlink_delta; /* entry for "dentry" */ struct buffer_head *bh; struct ext4_dir_entry_2 *de; int inlined; /* entry for ".." in inode if it's a directory */ struct buffer_head *dir_bh; struct ext4_dir_entry_2 *parent_de; int dir_inlined; }; static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent, bool is_cross) { int retval; ent->is_dir = true; if (!is_cross) return 0; ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode, &retval, &ent->parent_de, &ent->dir_inlined); if (!ent->dir_bh) return retval; if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino) return -EFSCORRUPTED; BUFFER_TRACE(ent->dir_bh, "get_write_access"); return ext4_journal_get_write_access(handle, ent->dir->i_sb, ent->dir_bh, EXT4_JTR_NONE); } static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent, unsigned dir_ino) { int retval; if (!ent->dir_bh) return 0; ent->parent_de->inode = cpu_to_le32(dir_ino); BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata"); if (!ent->dir_inlined) { if (is_dx(ent->inode)) { retval = ext4_handle_dirty_dx_node(handle, ent->inode, ent->dir_bh); } else { retval = ext4_handle_dirty_dirblock(handle, ent->inode, ent->dir_bh); } } else { retval = ext4_mark_inode_dirty(handle, ent->inode); } if (retval) { ext4_std_error(ent->dir->i_sb, retval); return retval; } return 0; } static int ext4_setent(handle_t *handle, struct ext4_renament *ent, unsigned ino, unsigned file_type) { int retval, retval2; BUFFER_TRACE(ent->bh, "get write access"); retval = ext4_journal_get_write_access(handle, ent->dir->i_sb, ent->bh, EXT4_JTR_NONE); if (retval) return retval; ent->de->inode = cpu_to_le32(ino); if (ext4_has_feature_filetype(ent->dir->i_sb)) ent->de->file_type = file_type; inode_inc_iversion(ent->dir); inode_set_mtime_to_ts(ent->dir, inode_set_ctime_current(ent->dir)); retval = ext4_mark_inode_dirty(handle, ent->dir); BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata"); if (!ent->inlined) { retval2 = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh); if (unlikely(retval2)) { ext4_std_error(ent->dir->i_sb, retval2); return retval2; } } return retval; } static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, unsigned ino, unsigned file_type) { struct ext4_renament old = *ent; int retval = 0; /* * old->de could have moved from under us during make indexed dir, * so the old->de may no longer valid and need to find it again * before reset old inode info. */ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, &old.inlined); if (IS_ERR(old.bh)) retval = PTR_ERR(old.bh); if (!old.bh) retval = -ENOENT; if (retval) { ext4_std_error(old.dir->i_sb, retval); return; } ext4_setent(handle, &old, ino, file_type); brelse(old.bh); } static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, const struct qstr *d_name) { int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; bh = ext4_find_entry(dir, d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); if (bh) { retval = ext4_delete_entry(handle, dir, de, bh); brelse(bh); } return retval; } static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, int force_reread) { int retval; /* * ent->de could have moved from under us during htree split, so make * sure that we are deleting the right entry. We might also be pointing * to a stale entry in the unused part of ent->bh so just checking inum * and the name isn't enough. */ if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino || ent->de->name_len != ent->dentry->d_name.len || strncmp(ent->de->name, ent->dentry->d_name.name, ent->de->name_len) || force_reread) { retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); } else { retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh); if (retval == -ENOENT) { retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); } } if (retval) { ext4_warning_inode(ent->dir, "Deleting old file: nlink %d, error=%d", ent->dir->i_nlink, retval); } } static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) { if (ent->dir_nlink_delta) { if (ent->dir_nlink_delta == -1) ext4_dec_count(ent->dir); else ext4_inc_count(ent->dir); ext4_mark_inode_dirty(handle, ent->dir); } } static struct inode *ext4_whiteout_for_rename(struct mnt_idmap *idmap, struct ext4_renament *ent, int credits, handle_t **h) { struct inode *wh; handle_t *handle; int retries = 0; /* * for inode block, sb block, group summaries, * and inode bitmap */ credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + EXT4_XATTR_TRANS_BLOCKS + 4); retry: wh = ext4_new_inode_start_handle(idmap, ent->dir, S_IFCHR | WHITEOUT_MODE, &ent->dentry->d_name, 0, NULL, EXT4_HT_DIR, credits); handle = ext4_journal_current_handle(); if (IS_ERR(wh)) { if (handle) ext4_journal_stop(handle); if (PTR_ERR(wh) == -ENOSPC && ext4_should_retry_alloc(ent->dir->i_sb, &retries)) goto retry; } else { *h = handle; init_special_inode(wh, wh->i_mode, WHITEOUT_DEV); wh->i_op = &ext4_special_inode_operations; } return wh; } /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. * * n.b. old_{dentry,inode) refers to the source dentry/inode * while new_{dentry,inode) refers to the destination dentry/inode * This comes from rename(const char *oldpath, const char *newpath) */ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { handle_t *handle = NULL; struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, .inode = d_inode(new_dentry), }; int force_reread; int retval; struct inode *whiteout = NULL; int credits; u8 old_file_type; if (new.inode && new.inode->i_nlink == 0) { EXT4_ERROR_INODE(new.inode, "target of rename is already freed"); return -EFSCORRUPTED; } if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; retval = dquot_initialize(old.dir); if (retval) return retval; retval = dquot_initialize(old.inode); if (retval) return retval; retval = dquot_initialize(new.dir); if (retval) return retval; /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new.inode) { retval = dquot_initialize(new.inode); if (retval) return retval; } old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, &old.inlined); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) goto release_bh; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; goto release_bh; } if (new.bh) { if (!new.inode) { brelse(new.bh); new.bh = NULL; } } if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) ext4_alloc_da_blocks(old.inode); credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); if (!(flags & RENAME_WHITEOUT)) { handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); if (IS_ERR(handle)) { retval = PTR_ERR(handle); goto release_bh; } } else { whiteout = ext4_whiteout_for_rename(idmap, &old, credits, &handle); if (IS_ERR(whiteout)) { retval = PTR_ERR(whiteout); goto release_bh; } } old_file_type = old.de->file_type; if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); if (S_ISDIR(old.inode->i_mode)) { if (new.inode) { retval = -ENOTEMPTY; if (!ext4_empty_dir(new.inode)) goto end_rename; } else { retval = -EMLINK; if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir)) goto end_rename; } retval = ext4_rename_dir_prepare(handle, &old, new.dir != old.dir); if (retval) goto end_rename; } /* * If we're renaming a file within an inline_data dir and adding or * setting the new dirent causes a conversion from inline_data to * extents/blockmap, we need to force the dirent delete code to * re-read the directory, or else we end up trying to delete a dirent * from what is now the extent tree root (or a block map). */ force_reread = (new.dir->i_ino == old.dir->i_ino && ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); if (whiteout) { /* * Do this before adding a new entry, so the old entry is sure * to be still pointing to the valid old entry. */ retval = ext4_setent(handle, &old, whiteout->i_ino, EXT4_FT_CHRDEV); if (retval) goto end_rename; retval = ext4_mark_inode_dirty(handle, whiteout); if (unlikely(retval)) goto end_rename; } if (!new.bh) { retval = ext4_add_entry(handle, new.dentry, old.inode); if (retval) goto end_rename; } else { retval = ext4_setent(handle, &new, old.inode->i_ino, old_file_type); if (retval) goto end_rename; } if (force_reread) force_reread = !ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA); /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ inode_set_ctime_current(old.inode); retval = ext4_mark_inode_dirty(handle, old.inode); if (unlikely(retval)) goto end_rename; if (!whiteout) { /* * ok, that's it */ ext4_rename_delete(handle, &old, force_reread); } if (new.inode) { ext4_dec_count(new.inode); inode_set_ctime_current(new.inode); } inode_set_mtime_to_ts(old.dir, inode_set_ctime_current(old.dir)); ext4_update_dx_flag(old.dir); if (old.is_dir) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) goto end_rename; ext4_dec_count(old.dir); if (new.inode) { /* checked ext4_empty_dir above, can't have another * parent, ext4_dec_count() won't work for many-linked * dirs */ clear_nlink(new.inode); } else { ext4_inc_count(new.dir); ext4_update_dx_flag(new.dir); retval = ext4_mark_inode_dirty(handle, new.dir); if (unlikely(retval)) goto end_rename; } } retval = ext4_mark_inode_dirty(handle, old.dir); if (unlikely(retval)) goto end_rename; if (old.is_dir) { /* * We disable fast commits here that's because the * replay code is not yet capable of changing dot dot * dirents in directories. */ ext4_fc_mark_ineligible(old.inode->i_sb, EXT4_FC_REASON_RENAME_DIR, handle); } else { struct super_block *sb = old.inode->i_sb; if (new.inode) ext4_fc_track_unlink(handle, new.dentry); if (test_opt2(sb, JOURNAL_FAST_COMMIT) && !(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) && !(ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE))) { __ext4_fc_track_link(handle, old.inode, new.dentry); __ext4_fc_track_unlink(handle, old.inode, old.dentry); if (whiteout) __ext4_fc_track_create(handle, whiteout, old.dentry); } } if (new.inode) { retval = ext4_mark_inode_dirty(handle, new.inode); if (unlikely(retval)) goto end_rename; if (!new.inode->i_nlink) ext4_orphan_add(handle, new.inode); } retval = 0; end_rename: if (whiteout) { if (retval) { ext4_resetent(handle, &old, old.inode->i_ino, old_file_type); drop_nlink(whiteout); ext4_mark_inode_dirty(handle, whiteout); ext4_orphan_add(handle, whiteout); } unlock_new_inode(whiteout); ext4_journal_stop(handle); iput(whiteout); } else { ext4_journal_stop(handle); } release_bh: brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); return retval; } static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { handle_t *handle = NULL; struct ext4_renament old = { .dir = old_dir, .dentry = old_dentry, .inode = d_inode(old_dentry), }; struct ext4_renament new = { .dir = new_dir, .dentry = new_dentry, .inode = d_inode(new_dentry), }; u8 new_file_type; int retval; if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) && !projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid)) || (ext4_test_inode_flag(old_dir, EXT4_INODE_PROJINHERIT) && !projid_eq(EXT4_I(old_dir)->i_projid, EXT4_I(new_dentry->d_inode)->i_projid))) return -EXDEV; retval = dquot_initialize(old.dir); if (retval) return retval; retval = dquot_initialize(new.dir); if (retval) return retval; old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, &old.inlined); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) goto end_rename; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; goto end_rename; } /* RENAME_EXCHANGE case: old *and* new must both exist */ if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino) goto end_rename; handle = ext4_journal_start(old.dir, EXT4_HT_DIR, (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); handle = NULL; goto end_rename; } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); if (S_ISDIR(old.inode->i_mode)) { retval = ext4_rename_dir_prepare(handle, &old, new.dir != old.dir); if (retval) goto end_rename; } if (S_ISDIR(new.inode->i_mode)) { retval = ext4_rename_dir_prepare(handle, &new, new.dir != old.dir); if (retval) goto end_rename; } /* * Other than the special case of overwriting a directory, parents' * nlink only needs to be modified if this is a cross directory rename. */ if (old.dir != new.dir && old.is_dir != new.is_dir) { old.dir_nlink_delta = old.is_dir ? -1 : 1; new.dir_nlink_delta = -old.dir_nlink_delta; retval = -EMLINK; if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) || (new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir))) goto end_rename; } new_file_type = new.de->file_type; retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type); if (retval) goto end_rename; retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type); if (retval) goto end_rename; /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ inode_set_ctime_current(old.inode); inode_set_ctime_current(new.inode); retval = ext4_mark_inode_dirty(handle, old.inode); if (unlikely(retval)) goto end_rename; retval = ext4_mark_inode_dirty(handle, new.inode); if (unlikely(retval)) goto end_rename; ext4_fc_mark_ineligible(new.inode->i_sb, EXT4_FC_REASON_CROSS_RENAME, handle); if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) goto end_rename; } if (new.dir_bh) { retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino); if (retval) goto end_rename; } ext4_update_dir_count(handle, &old); ext4_update_dir_count(handle, &new); retval = 0; end_rename: brelse(old.dir_bh); brelse(new.dir_bh); brelse(old.bh); brelse(new.bh); if (handle) ext4_journal_stop(handle); return retval; } static int ext4_rename2(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int err; if (unlikely(ext4_forced_shutdown(old_dir->i_sb))) return -EIO; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (err) return err; if (flags & RENAME_EXCHANGE) { return ext4_cross_rename(old_dir, old_dentry, new_dir, new_dentry); } return ext4_rename(idmap, old_dir, old_dentry, new_dir, new_dentry, flags); } /* * directories can handle most operations... */ const struct inode_operations ext4_dir_inode_operations = { .create = ext4_create, .lookup = ext4_lookup, .link = ext4_link, .unlink = ext4_unlink, .symlink = ext4_symlink, .mkdir = ext4_mkdir, .rmdir = ext4_rmdir, .mknod = ext4_mknod, .tmpfile = ext4_tmpfile, .rename = ext4_rename2, .setattr = ext4_setattr, .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_inode_acl = ext4_get_acl, .set_acl = ext4_set_acl, .fiemap = ext4_fiemap, .fileattr_get = ext4_fileattr_get, .fileattr_set = ext4_fileattr_set, }; const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_inode_acl = ext4_get_acl, .set_acl = ext4_set_acl, };
5 4 5 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 // SPDX-License-Identifier: GPL-2.0-or-later /* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/usb.h> #include "as102_drv.h" #include "as102_usb_drv.h" #include "as102_fw.h" static void as102_usb_disconnect(struct usb_interface *interface); static int as102_usb_probe(struct usb_interface *interface, const struct usb_device_id *id); static int as102_usb_start_stream(struct as102_dev_t *dev); static void as102_usb_stop_stream(struct as102_dev_t *dev); static int as102_open(struct inode *inode, struct file *file); static int as102_release(struct inode *inode, struct file *file); static const struct usb_device_id as102_usb_id_table[] = { { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) }, { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) }, { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) }, { USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) }, { USB_DEVICE(SKY_IT_DIGITAL_KEY_USB_VID, SKY_IT_DIGITAL_KEY_USB_PID) }, { } /* Terminating entry */ }; /* Note that this table must always have the same number of entries as the as102_usb_id_table struct */ static const char * const as102_device_names[] = { AS102_REFERENCE_DESIGN, AS102_PCTV_74E, AS102_ELGATO_EYETV_DTT_NAME, AS102_NBOX_DVBT_DONGLE_NAME, AS102_SKY_IT_DIGITAL_KEY_NAME, NULL /* Terminating entry */ }; /* eLNA configuration: devices built on the reference design work best with 0xA0, while custom designs seem to require 0xC0 */ static uint8_t const as102_elna_cfg[] = { 0xA0, 0xC0, 0xC0, 0xA0, 0xA0, 0x00 /* Terminating entry */ }; struct usb_driver as102_usb_driver = { .name = DRIVER_FULL_NAME, .probe = as102_usb_probe, .disconnect = as102_usb_disconnect, .id_table = as102_usb_id_table }; static const struct file_operations as102_dev_fops = { .owner = THIS_MODULE, .open = as102_open, .release = as102_release, }; static struct usb_class_driver as102_usb_class_driver = { .name = "aton2-%d", .fops = &as102_dev_fops, .minor_base = AS102_DEVICE_MAJOR, }; static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, unsigned char *recv_buf, int recv_buf_len) { int ret = 0; if (send_buf != NULL) { ret = usb_control_msg(bus_adap->usb_dev, usb_sndctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_TX_CTRL_CMD, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ send_buf, send_buf_len, USB_CTRL_SET_TIMEOUT /* 200 */); if (ret < 0) { dev_dbg(&bus_adap->usb_dev->dev, "usb_control_msg(send) failed, err %i\n", ret); return ret; } if (ret != send_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only wrote %d of %d bytes\n", ret, send_buf_len); return -1; } } if (recv_buf != NULL) { #ifdef TRACE dev_dbg(bus_adap->usb_dev->dev, "want to read: %d bytes\n", recv_buf_len); #endif ret = usb_control_msg(bus_adap->usb_dev, usb_rcvctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_RX_CTRL_CMD, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ recv_buf, recv_buf_len, USB_CTRL_GET_TIMEOUT /* 200 */); if (ret < 0) { dev_dbg(&bus_adap->usb_dev->dev, "usb_control_msg(recv) failed, err %i\n", ret); return ret; } #ifdef TRACE dev_dbg(bus_adap->usb_dev->dev, "read %d bytes\n", recv_buf_len); #endif } return ret; } static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, int swap32) { int ret, actual_len; ret = usb_bulk_msg(bus_adap->usb_dev, usb_sndbulkpipe(bus_adap->usb_dev, 1), send_buf, send_buf_len, &actual_len, 200); if (ret) { dev_dbg(&bus_adap->usb_dev->dev, "usb_bulk_msg(send) failed, err %i\n", ret); return ret; } if (actual_len != send_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only wrote %d of %d bytes\n", actual_len, send_buf_len); return -1; } return actual_len; } static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap, unsigned char *recv_buf, int recv_buf_len) { int ret, actual_len; if (recv_buf == NULL) return -EINVAL; ret = usb_bulk_msg(bus_adap->usb_dev, usb_rcvbulkpipe(bus_adap->usb_dev, 2), recv_buf, recv_buf_len, &actual_len, 200); if (ret) { dev_dbg(&bus_adap->usb_dev->dev, "usb_bulk_msg(recv) failed, err %i\n", ret); return ret; } if (actual_len != recv_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only read %d of %d bytes\n", actual_len, recv_buf_len); return -1; } return actual_len; } static const struct as102_priv_ops_t as102_priv_ops = { .upload_fw_pkt = as102_send_ep1, .xfer_cmd = as102_usb_xfer_cmd, .as102_read_ep2 = as102_read_ep2, .start_stream = as102_usb_start_stream, .stop_stream = as102_usb_stop_stream, }; static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb) { int err; usb_fill_bulk_urb(urb, dev->bus_adap.usb_dev, usb_rcvbulkpipe(dev->bus_adap.usb_dev, 0x2), urb->transfer_buffer, AS102_USB_BUF_SIZE, as102_urb_stream_irq, dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_dbg(&urb->dev->dev, "%s: usb_submit_urb failed\n", __func__); return err; } void as102_urb_stream_irq(struct urb *urb) { struct as102_dev_t *as102_dev = urb->context; if (urb->actual_length > 0) { dvb_dmx_swfilter(&as102_dev->dvb_dmx, urb->transfer_buffer, urb->actual_length); } else { if (urb->actual_length == 0) memset(urb->transfer_buffer, 0, AS102_USB_BUF_SIZE); } /* is not stopped, re-submit urb */ if (as102_dev->streaming) as102_submit_urb_stream(as102_dev, urb); } static void as102_free_usb_stream_buffer(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_free_urb(dev->stream_urb[i]); usb_free_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, dev->stream, dev->dma_addr); } static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev) { int i; dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, GFP_KERNEL, &dev->dma_addr); if (!dev->stream) { dev_dbg(&dev->bus_adap.usb_dev->dev, "%s: usb_buffer_alloc failed\n", __func__); return -ENOMEM; } memset(dev->stream, 0, MAX_STREAM_URB * AS102_USB_BUF_SIZE); /* init urb buffers */ for (i = 0; i < MAX_STREAM_URB; i++) { struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { as102_free_usb_stream_buffer(dev); return -ENOMEM; } urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE); urb->transfer_dma = dev->dma_addr + (i * AS102_USB_BUF_SIZE); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer_length = AS102_USB_BUF_SIZE; dev->stream_urb[i] = urb; } return 0; } static void as102_usb_stop_stream(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_kill_urb(dev->stream_urb[i]); } static int as102_usb_start_stream(struct as102_dev_t *dev) { int i, ret = 0; for (i = 0; i < MAX_STREAM_URB; i++) { ret = as102_submit_urb_stream(dev, dev->stream_urb[i]); if (ret) { as102_usb_stop_stream(dev); return ret; } } return 0; } static void as102_usb_release(struct kref *kref) { struct as102_dev_t *as102_dev; as102_dev = container_of(kref, struct as102_dev_t, kref); usb_put_dev(as102_dev->bus_adap.usb_dev); kfree(as102_dev); } static void as102_usb_disconnect(struct usb_interface *intf) { struct as102_dev_t *as102_dev; /* extract as102_dev_t from usb_device private data */ as102_dev = usb_get_intfdata(intf); /* unregister dvb layer */ as102_dvb_unregister(as102_dev); /* free usb buffers */ as102_free_usb_stream_buffer(as102_dev); usb_set_intfdata(intf, NULL); /* usb unregister device */ usb_deregister_dev(intf, &as102_usb_class_driver); /* decrement usage counter */ kref_put(&as102_dev->kref, as102_usb_release); pr_info("%s: device has been disconnected\n", DRIVER_NAME); } static int as102_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct as102_dev_t *as102_dev; int i; /* This should never actually happen */ if (ARRAY_SIZE(as102_usb_id_table) != (sizeof(as102_device_names) / sizeof(const char *))) { pr_err("Device names table invalid size"); return -EINVAL; } as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL); if (as102_dev == NULL) return -ENOMEM; /* Assign the user-friendly device name */ for (i = 0; i < ARRAY_SIZE(as102_usb_id_table); i++) { if (id == &as102_usb_id_table[i]) { as102_dev->name = as102_device_names[i]; as102_dev->elna_cfg = as102_elna_cfg[i]; } } if (as102_dev->name == NULL) as102_dev->name = "Unknown AS102 device"; /* set private callback functions */ as102_dev->bus_adap.ops = &as102_priv_ops; /* init cmd token for usb bus */ as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c; as102_dev->bus_adap.rsp = &as102_dev->bus_adap.token.usb.r; /* init kernel device reference */ kref_init(&as102_dev->kref); /* store as102 device to usb_device private data */ usb_set_intfdata(intf, (void *) as102_dev); /* store in as102 device the usb_device pointer */ as102_dev->bus_adap.usb_dev = usb_get_dev(interface_to_usbdev(intf)); /* we can register the device now, as it is ready */ ret = usb_register_dev(intf, &as102_usb_class_driver); if (ret < 0) { /* something prevented us from registering this driver */ dev_err(&intf->dev, "%s: usb_register_dev() failed (errno = %d)\n", __func__, ret); goto failed; } pr_info("%s: device has been detected\n", DRIVER_NAME); /* request buffer allocation for streaming */ ret = as102_alloc_usb_stream_buffer(as102_dev); if (ret != 0) goto failed_stream; /* register dvb layer */ ret = as102_dvb_register(as102_dev); if (ret != 0) goto failed_dvb; return ret; failed_dvb: as102_free_usb_stream_buffer(as102_dev); failed_stream: usb_deregister_dev(intf, &as102_usb_class_driver); failed: usb_put_dev(as102_dev->bus_adap.usb_dev); usb_set_intfdata(intf, NULL); kfree(as102_dev); return ret; } static int as102_open(struct inode *inode, struct file *file) { int ret = 0, minor = 0; struct usb_interface *intf = NULL; struct as102_dev_t *dev = NULL; /* read minor from inode */ minor = iminor(inode); /* fetch device from usb interface */ intf = usb_find_interface(&as102_usb_driver, minor); if (intf == NULL) { pr_err("%s: can't find device for minor %d\n", __func__, minor); ret = -ENODEV; goto exit; } /* get our device */ dev = usb_get_intfdata(intf); if (dev == NULL) { ret = -EFAULT; goto exit; } /* save our device object in the file's private structure */ file->private_data = dev; /* increment our usage count for the device */ kref_get(&dev->kref); exit: return ret; } static int as102_release(struct inode *inode, struct file *file) { struct as102_dev_t *dev = NULL; dev = file->private_data; if (dev != NULL) { /* decrement the count on our device */ kref_put(&dev->kref, as102_usb_release); } return 0; } MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 // SPDX-License-Identifier: GPL-2.0-only /* * drivers/mfd/mfd-core.c * * core MFD support * Copyright (c) 2006 Ian Molton * Copyright (c) 2007,2008 Dmitry Baryshkov */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/list.h> #include <linux/property.h> #include <linux/mfd/core.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/regulator/consumer.h> static LIST_HEAD(mfd_of_node_list); struct mfd_of_node_entry { struct list_head list; struct device *dev; struct device_node *np; }; static const struct device_type mfd_dev_type = { .name = "mfd_device", }; #if IS_ENABLED(CONFIG_ACPI) struct match_ids_walk_data { struct acpi_device_id *ids; struct acpi_device *adev; }; static int match_device_ids(struct acpi_device *adev, void *data) { struct match_ids_walk_data *wd = data; if (!acpi_match_device_ids(adev, wd->ids)) { wd->adev = adev; return 1; } return 0; } static void mfd_acpi_add_device(const struct mfd_cell *cell, struct platform_device *pdev) { const struct mfd_cell_acpi_match *match = cell->acpi_match; struct acpi_device *adev = NULL; struct acpi_device *parent; parent = ACPI_COMPANION(pdev->dev.parent); if (!parent) return; /* * MFD child device gets its ACPI handle either from the ACPI device * directly under the parent that matches the either _HID or _CID, or * _ADR or it will use the parent handle if is no ID is given. * * Note that use of _ADR is a grey area in the ACPI specification, * though at least Intel Galileo Gen 2 is using it to distinguish * the children devices. */ if (match) { if (match->pnpid) { struct acpi_device_id ids[2] = {}; struct match_ids_walk_data wd = { .adev = NULL, .ids = ids, }; strscpy(ids[0].id, match->pnpid, sizeof(ids[0].id)); acpi_dev_for_each_child(parent, match_device_ids, &wd); adev = wd.adev; } else { adev = acpi_find_child_device(parent, match->adr, false); } } device_set_node(&pdev->dev, acpi_fwnode_handle(adev ?: parent)); } #else static inline void mfd_acpi_add_device(const struct mfd_cell *cell, struct platform_device *pdev) { } #endif static int mfd_match_of_node_to_dev(struct platform_device *pdev, struct device_node *np, const struct mfd_cell *cell) { #if IS_ENABLED(CONFIG_OF) struct mfd_of_node_entry *of_entry; u64 of_node_addr; /* Skip if OF node has previously been allocated to a device */ list_for_each_entry(of_entry, &mfd_of_node_list, list) if (of_entry->np == np) return -EAGAIN; if (!cell->use_of_reg) /* No of_reg defined - allocate first free compatible match */ goto allocate_of_node; /* We only care about each node's first defined address */ if (of_property_read_reg(np, 0, &of_node_addr, NULL)) /* OF node does not contatin a 'reg' property to match to */ return -EAGAIN; if (cell->of_reg != of_node_addr) /* No match */ return -EAGAIN; allocate_of_node: of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL); if (!of_entry) return -ENOMEM; of_entry->dev = &pdev->dev; of_entry->np = np; list_add_tail(&of_entry->list, &mfd_of_node_list); device_set_node(&pdev->dev, of_fwnode_handle(np)); #endif return 0; } static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, int irq_base, struct irq_domain *domain) { struct resource *res; struct platform_device *pdev; struct device_node *np = NULL; struct mfd_of_node_entry *of_entry, *tmp; bool disabled = false; int ret = -ENOMEM; int platform_id; int r; if (id == PLATFORM_DEVID_AUTO) platform_id = id; else platform_id = id + cell->id; pdev = platform_device_alloc(cell->name, platform_id); if (!pdev) goto fail_alloc; pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); if (!pdev->mfd_cell) goto fail_device; res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL); if (!res) goto fail_device; pdev->dev.parent = parent; pdev->dev.type = &mfd_dev_type; pdev->dev.dma_mask = parent->dma_mask; pdev->dev.dma_parms = parent->dma_parms; pdev->dev.coherent_dma_mask = parent->coherent_dma_mask; ret = regulator_bulk_register_supply_alias( &pdev->dev, cell->parent_supplies, parent, cell->parent_supplies, cell->num_parent_supplies); if (ret < 0) goto fail_res; if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { /* Skip 'disabled' devices */ if (!of_device_is_available(np)) { disabled = true; continue; } ret = mfd_match_of_node_to_dev(pdev, np, cell); if (ret == -EAGAIN) continue; of_node_put(np); if (ret) goto fail_alias; goto match; } } if (disabled) { /* Ignore 'disabled' devices error free */ ret = 0; goto fail_alias; } match: if (!pdev->dev.of_node) pr_warn("%s: Failed to locate of_node [id: %d]\n", cell->name, platform_id); } mfd_acpi_add_device(cell, pdev); if (cell->pdata_size) { ret = platform_device_add_data(pdev, cell->platform_data, cell->pdata_size); if (ret) goto fail_of_entry; } if (cell->swnode) { ret = device_add_software_node(&pdev->dev, cell->swnode); if (ret) goto fail_of_entry; } for (r = 0; r < cell->num_resources; r++) { res[r].name = cell->resources[r].name; res[r].flags = cell->resources[r].flags; /* Find out base to use */ if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) { res[r].parent = mem_base; res[r].start = mem_base->start + cell->resources[r].start; res[r].end = mem_base->start + cell->resources[r].end; } else if (cell->resources[r].flags & IORESOURCE_IRQ) { if (domain) { /* Unable to create mappings for IRQ ranges. */ WARN_ON(cell->resources[r].start != cell->resources[r].end); res[r].start = res[r].end = irq_create_mapping( domain, cell->resources[r].start); } else { res[r].start = irq_base + cell->resources[r].start; res[r].end = irq_base + cell->resources[r].end; } } else { res[r].parent = cell->resources[r].parent; res[r].start = cell->resources[r].start; res[r].end = cell->resources[r].end; } if (!cell->ignore_resource_conflicts) { if (has_acpi_companion(&pdev->dev)) { ret = acpi_check_resource_conflict(&res[r]); if (ret) goto fail_res_conflict; } } } ret = platform_device_add_resources(pdev, res, cell->num_resources); if (ret) goto fail_res_conflict; ret = platform_device_add(pdev); if (ret) goto fail_res_conflict; if (cell->pm_runtime_no_callbacks) pm_runtime_no_callbacks(&pdev->dev); kfree(res); return 0; fail_res_conflict: if (cell->swnode) device_remove_software_node(&pdev->dev); fail_of_entry: list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list) if (of_entry->dev == &pdev->dev) { list_del(&of_entry->list); kfree(of_entry); } fail_alias: regulator_bulk_unregister_supply_alias(&pdev->dev, cell->parent_supplies, cell->num_parent_supplies); fail_res: kfree(res); fail_device: platform_device_put(pdev); fail_alloc: return ret; } /** * mfd_add_devices - register child devices * * @parent: Pointer to parent device. * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care * of device numbering, or will be added to a device's cell_id. * @cells: Array of (struct mfd_cell)s describing child devices. * @n_devs: Number of child devices to register. * @mem_base: Parent register range resource for child devices. * @irq_base: Base of the range of virtual interrupt numbers allocated for * this MFD device. Unused if @domain is specified. * @domain: Interrupt domain to create mappings for hardware interrupts. */ int mfd_add_devices(struct device *parent, int id, const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base, struct irq_domain *domain) { int i; int ret; for (i = 0; i < n_devs; i++) { ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base, domain); if (ret) goto fail; } return 0; fail: if (i) mfd_remove_devices(parent); return ret; } EXPORT_SYMBOL(mfd_add_devices); static int mfd_remove_devices_fn(struct device *dev, void *data) { struct platform_device *pdev; const struct mfd_cell *cell; struct mfd_of_node_entry *of_entry, *tmp; int *level = data; if (dev->type != &mfd_dev_type) return 0; pdev = to_platform_device(dev); cell = mfd_get_cell(pdev); if (level && cell->level > *level) return 0; if (cell->swnode) device_remove_software_node(&pdev->dev); list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list) if (of_entry->dev == &pdev->dev) { list_del(&of_entry->list); kfree(of_entry); } regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); platform_device_unregister(pdev); return 0; } void mfd_remove_devices_late(struct device *parent) { int level = MFD_DEP_LEVEL_HIGH; device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices_late); void mfd_remove_devices(struct device *parent) { int level = MFD_DEP_LEVEL_NORMAL; device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices); static void devm_mfd_dev_release(struct device *dev, void *res) { mfd_remove_devices(dev); } /** * devm_mfd_add_devices - Resource managed version of mfd_add_devices() * * Returns 0 on success or an appropriate negative error number on failure. * All child-devices of the MFD will automatically be removed when it gets * unbinded. * * @dev: Pointer to parent device. * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care * of device numbering, or will be added to a device's cell_id. * @cells: Array of (struct mfd_cell)s describing child devices. * @n_devs: Number of child devices to register. * @mem_base: Parent register range resource for child devices. * @irq_base: Base of the range of virtual interrupt numbers allocated for * this MFD device. Unused if @domain is specified. * @domain: Interrupt domain to create mappings for hardware interrupts. */ int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base, struct irq_domain *domain) { struct device **ptr; int ret; ptr = devres_alloc(devm_mfd_dev_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = mfd_add_devices(dev, id, cells, n_devs, mem_base, irq_base, domain); if (ret < 0) { devres_free(ptr); return ret; } *ptr = dev; devres_add(dev, ptr); return ret; } EXPORT_SYMBOL(devm_mfd_add_devices); MODULE_DESCRIPTION("Core MFD support"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
11 6 29 1093 8 11 7 7 13 7 13 13 147 17 2 7 7 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H /* * Copyright 1995 Linus Torvalds */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/compiler.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hugetlb_inline.h> struct folio_batch; unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) invalidate_mapping_pages(inode->i_mapping, 0, -1); } int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int filemap_invalidate_pages(struct address_space *mapping, loff_t pos, loff_t end, bool nowait); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); int filemap_flush(struct address_space *); int filemap_fdatawait_keep_errors(struct address_space *mapping); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); int filemap_invalidate_inode(struct inode *inode, bool flush, loff_t start, loff_t end); static inline int filemap_fdatawait(struct address_space *mapping) { return filemap_fdatawait_range(mapping, 0, LLONG_MAX); } bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); int filemap_check_errors(struct address_space *mapping); void __filemap_set_wb_err(struct address_space *mapping, int err); int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc); int kiocb_write_and_wait(struct kiocb *iocb, size_t count); static inline int filemap_write_and_wait(struct address_space *mapping) { return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); } /** * filemap_set_wb_err - set a writeback error on an address_space * @mapping: mapping in which to set writeback error * @err: error to be set in mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * filemap_set_wb_err to record the error in the mapping so that it will be * automatically reported whenever fsync is called on the file. */ static inline void filemap_set_wb_err(struct address_space *mapping, int err) { /* Fastpath for common case of no error */ if (unlikely(err)) __filemap_set_wb_err(mapping, err); } /** * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * * Grab the errseq_t value from the mapping, and see if it has changed "since" * the given value was sampled. * * If it has then report the latest error set, otherwise return 0. */ static inline int filemap_check_wb_err(struct address_space *mapping, errseq_t since) { return errseq_check(&mapping->wb_err, since); } /** * filemap_sample_wb_err - sample the current errseq_t to test for later errors * @mapping: mapping to be sampled * * Writeback errors are always reported relative to a particular sample point * in the past. This function provides those sample points. */ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) { return errseq_sample(&mapping->wb_err); } /** * file_sample_sb_err - sample the current errseq_t to test for later errors * @file: file pointer to be sampled * * Grab the most current superblock-level errseq_t value for the given * struct file. */ static inline errseq_t file_sample_sb_err(struct file *file) { return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); } /* * Flush file data before changing attributes. Caller must hold any locks * required to prevent further writes to this file until we're done setting * flags. */ static inline int inode_drain_writes(struct inode *inode) { inode_dio_wait(inode); return filemap_write_and_wait(inode->i_mapping); } static inline bool mapping_empty(struct address_space *mapping) { return xa_empty(&mapping->i_pages); } /* * mapping_shrinkable - test if page cache state allows inode reclaim * @mapping: the page cache mapping * * This checks the mapping's cache state for the pupose of inode * reclaim and LRU management. * * The caller is expected to hold the i_lock, but is not required to * hold the i_pages lock, which usually protects cache state. That's * because the i_lock and the list_lru lock that protect the inode and * its LRU state don't nest inside the irq-safe i_pages lock. * * Cache deletions are performed under the i_lock, which ensures that * when an inode goes empty, it will reliably get queued on the LRU. * * Cache additions do not acquire the i_lock and may race with this * check, in which case we'll report the inode as shrinkable when it * has cache pages. This is okay: the shrinker also checks the * refcount and the referenced bit, which will be elevated or set in * the process of adding new cache pages to an inode. */ static inline bool mapping_shrinkable(struct address_space *mapping) { void *head; /* * On highmem systems, there could be lowmem pressure from the * inodes before there is highmem pressure from the page * cache. Make inodes shrinkable regardless of cache state. */ if (IS_ENABLED(CONFIG_HIGHMEM)) return true; /* Cache completely empty? Shrink away. */ head = rcu_access_pointer(mapping->i_pages.xa_head); if (!head) return true; /* * The xarray stores single offset-0 entries directly in the * head pointer, which allows non-resident page cache entries * to escape the shadow shrinker's list of xarray nodes. The * inode shrinker needs to pick them up under memory pressure. */ if (!xa_is_node(head) && xa_is_value(head)) return true; return false; } /* * Bits in mapping->flags. */ enum mapping_flags { AS_EIO = 0, /* IO error on async write */ AS_ENOSPC = 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying folio contents */ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */ /* Bits 16-25 are used for FOLIO_ORDER */ AS_FOLIO_ORDER_BITS = 5, AS_FOLIO_ORDER_MIN = 16, AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS, }; #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1) #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN) #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX) #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK) /** * mapping_set_error - record a writeback error in the address_space * @mapping: the mapping in which an error should be set * @error: the error to set in the mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * mapping_set_error to record the error in the mapping so that it can be * reported when the application calls fsync(2). */ static inline void mapping_set_error(struct address_space *mapping, int error) { if (likely(!error)) return; /* Record in wb_err for checkers using errseq_t based tracking */ __filemap_set_wb_err(mapping, error); /* Record it in superblock */ if (mapping->host) errseq_set(&mapping->host->i_sb->s_wb_err, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_clear_unevictable(struct address_space *mapping) { clear_bit(AS_UNEVICTABLE, &mapping->flags); } static inline bool mapping_unevictable(struct address_space *mapping) { return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); } static inline int mapping_exiting(struct address_space *mapping) { return test_bit(AS_EXITING, &mapping->flags); } static inline void mapping_set_no_writeback_tags(struct address_space *mapping) { set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline int mapping_use_writeback_tags(struct address_space *mapping) { return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline bool mapping_release_always(const struct address_space *mapping) { return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_set_release_always(struct address_space *mapping) { set_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_clear_release_always(struct address_space *mapping) { clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline bool mapping_stable_writes(const struct address_space *mapping) { return test_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_stable_writes(struct address_space *mapping) { set_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_clear_stable_writes(struct address_space *mapping) { clear_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_inaccessible(struct address_space *mapping) { /* * It's expected inaccessible mappings are also unevictable. Compaction * migrate scanner (isolate_migratepages_block()) relies on this to * reduce page locking. */ set_bit(AS_UNEVICTABLE, &mapping->flags); set_bit(AS_INACCESSIBLE, &mapping->flags); } static inline bool mapping_inaccessible(struct address_space *mapping) { return test_bit(AS_INACCESSIBLE, &mapping->flags); } static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; } /* Restricts the given gfp_mask to what the mapping allows. */ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, gfp_t gfp_mask) { return mapping_gfp_mask(mapping) & gfp_mask; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->gfp_mask = mask; } /* * There are some parts of the kernel which assume that PMD entries * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, * limit the maximum allocation order to PMD size. I'm not aware of any * assumptions about maximum order if THP are disabled, but 8 seems like * a good order (that's 1MB if you're using 4kB pages) */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER #else #define PREFERRED_MAX_PAGECACHE_ORDER 8 #endif /* * xas_split_alloc() does not support arbitrary orders. This implies no * 512MB THP on ARM64 with 64KB base page size. */ #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) /* * mapping_max_folio_size_supported() - Check the max folio size supported * * The filesystem should call this function at mount time if there is a * requirement on the folio mapping size in the page cache. */ static inline size_t mapping_max_folio_size_supported(void) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER); return PAGE_SIZE; } /* * mapping_set_folio_order_range() - Set the orders supported by a file. * @mapping: The address space of the file. * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive). * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive). * * The filesystem should call this function in its inode constructor to * indicate which base size (min) and maximum size (max) of folio the VFS * can use to cache the contents of the file. This should only be used * if the filesystem needs special handling of folio sizes (ie there is * something the core cannot know). * Do not tune it based on, eg, i_size. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_folio_order_range(struct address_space *mapping, unsigned int min, unsigned int max) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; if (min > MAX_PAGECACHE_ORDER) min = MAX_PAGECACHE_ORDER; if (max > MAX_PAGECACHE_ORDER) max = MAX_PAGECACHE_ORDER; if (max < min) max = min; mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX); } static inline void mapping_set_folio_min_order(struct address_space *mapping, unsigned int min) { mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER); } /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The address space of the file. * * The filesystem should call this function in its inode constructor to * indicate that the VFS can use large folios to cache the contents of * the file. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_large_folios(struct address_space *mapping) { mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER); } static inline unsigned int mapping_max_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX; } static inline unsigned int mapping_min_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN; } static inline unsigned long mapping_min_folio_nrpages(struct address_space *mapping) { return 1UL << mapping_min_folio_order(mapping); } /** * mapping_align_index() - Align index for this mapping. * @mapping: The address_space. * @index: The page index. * * The index of a folio must be naturally aligned. If you are adding a * new folio to the page cache and need to know what index to give it, * call this function. */ static inline pgoff_t mapping_align_index(struct address_space *mapping, pgoff_t index) { return round_down(index, mapping_min_folio_nrpages(mapping)); } /* * Large folio support currently depends on THP. These dependencies are * being worked on but are not yet fixed. */ static inline bool mapping_large_folio_support(struct address_space *mapping) { /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, "Anonymous mapping always supports large folio"); return mapping_max_folio_order(mapping) > 0; } /* Return the maximum folio size for this pagecache mapping, in bytes. */ static inline size_t mapping_max_folio_size(const struct address_space *mapping) { return PAGE_SIZE << mapping_max_folio_order(mapping); } static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS return atomic_read(&mapping->nr_thps); #else return 0; #endif } static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); /** * folio_file_mapping - Find the mapping this folio belongs to. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the mapping of the * swap file or swap device where the data is stored. This is different * from the mapping returned by folio_mapping(). The only reason to * use it is if, like NFS, you return 0 from ->activate_swapfile. * * Do not call this for folios which aren't in the page cache or swap cache. */ static inline struct address_space *folio_file_mapping(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return swapcache_mapping(folio); return folio->mapping; } /** * folio_flush_mapping - Find the file mapping this folio belongs to. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Anonymous folios return NULL, even if they're in * the swap cache. Other kinds of folio also return NULL. * * This is ONLY used by architecture cache flushing code. If you aren't * writing cache flushing code, you want either folio_mapping() or * folio_file_mapping(). */ static inline struct address_space *folio_flush_mapping(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return NULL; return folio_mapping(folio); } static inline struct address_space *page_file_mapping(struct page *page) { return folio_file_mapping(page_folio(page)); } /** * folio_inode - Get the host inode for this folio. * @folio: The folio. * * For folios which are in the page cache, return the inode that this folio * belongs to. * * Do not call this for folios which aren't in the page cache. */ static inline struct inode *folio_inode(struct folio *folio) { return folio->mapping->host; } /** * folio_attach_private - Attach private data to a folio. * @folio: Folio to attach data to. * @data: Data to attach to folio. * * Attaching private data to a folio increments the page's reference count. * The data must be detached before the folio will be freed. */ static inline void folio_attach_private(struct folio *folio, void *data) { folio_get(folio); folio->private = data; folio_set_private(folio); } /** * folio_change_private - Change private data on a folio. * @folio: Folio to change the data on. * @data: Data to set on the folio. * * Change the private data attached to a folio and return the old * data. The page must previously have had data attached and the data * must be detached before the folio will be freed. * * Return: Data that was previously attached to the folio. */ static inline void *folio_change_private(struct folio *folio, void *data) { void *old = folio_get_private(folio); folio->private = data; return old; } /** * folio_detach_private - Detach private data from a folio. * @folio: Folio to detach data from. * * Removes the data that was previously attached to the folio and decrements * the refcount on the page. * * Return: Data that was attached to the folio. */ static inline void *folio_detach_private(struct folio *folio) { void *data = folio_get_private(folio); if (!folio_test_private(folio)) return NULL; folio_clear_private(folio); folio->private = NULL; folio_put(folio); return data; } static inline void attach_page_private(struct page *page, void *data) { folio_attach_private(page_folio(page), data); } static inline void *detach_page_private(struct page *page) { return folio_detach_private(page_folio(page)); } #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { return folio_alloc_noprof(gfp, order); } #endif #define filemap_alloc_folio(...) \ alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) static inline struct page *__page_cache_alloc(gfp_t gfp) { return &filemap_alloc_folio(gfp, 0)->page; } static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(struct file *, struct folio *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); /** * typedef fgf_t - Flags for getting folios from the page cache. * * Most users of the page cache will not need to use these flags; * there are convenience functions such as filemap_get_folio() and * filemap_lock_folio(). For users which need more control over exactly * what is done with the folios, these flags to __filemap_get_folio() * are available. * * * %FGP_ACCESSED - The folio will be marked accessed. * * %FGP_LOCK - The folio is returned locked. * * %FGP_CREAT - If no folio is present then a new folio is allocated, * added to the page cache and the VM's LRU list. The folio is * returned locked. * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the * folio is already in cache. If the folio was allocated, unlock it * before returning so the caller can do the same dance. * * %FGP_WRITE - The folio will be written to by the caller. * * %FGP_NOFS - __GFP_FS will get cleared in gfp. * * %FGP_NOWAIT - Don't block on the folio lock. * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) * * %FGP_DONTCACHE - Uncached buffered IO * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() * implementation. */ typedef unsigned int __bitwise fgf_t; #define FGP_ACCESSED ((__force fgf_t)0x00000001) #define FGP_LOCK ((__force fgf_t)0x00000002) #define FGP_CREAT ((__force fgf_t)0x00000004) #define FGP_WRITE ((__force fgf_t)0x00000008) #define FGP_NOFS ((__force fgf_t)0x00000010) #define FGP_NOWAIT ((__force fgf_t)0x00000020) #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) #define FGP_STABLE ((__force fgf_t)0x00000080) #define FGP_DONTCACHE ((__force fgf_t)0x00000100) #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) static inline unsigned int filemap_get_order(size_t size) { unsigned int shift = ilog2(size); if (shift <= PAGE_SHIFT) return 0; return shift - PAGE_SHIFT; } /** * fgf_set_order - Encode a length in the fgf_t flags. * @size: The suggested size of the folio to create. * * The caller of __filemap_get_folio() can use this to suggest a preferred * size for the folio that is created. If there is already a folio at * the index, it will be returned, no matter what its size. If a folio * is freshly created, it may be of a different size than requested * due to alignment constraints, memory pressure, or the presence of * other folios at nearby indices. */ static inline fgf_t fgf_set_order(size_t size) { unsigned int order = filemap_get_order(size); if (!order) return 0; return (__force fgf_t)(order << 26); } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); /** * filemap_get_folio - Find and get a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned with an increased refcount. * * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_get_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, 0, 0); } /** * filemap_lock_folio - Find and lock a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned locked with an increased refcount. * * Context: May sleep. * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_lock_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK, 0); } /** * filemap_grab_folio - grab a folio from the page cache * @mapping: The address space to search * @index: The page index * * Looks up the page cache entry at @mapping & @index. If no folio is found, * a new folio is created. The folio is locked, marked as accessed, and * returned. * * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found * and failed to create a folio. */ static inline struct folio *filemap_grab_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping_gfp_mask(mapping)); } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */ static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); } static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, fgf_t fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @index: the page index * * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Context: May sleep. * Return: A struct page or %NULL if there is no page in the cache for this * index. */ static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK, 0); } /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); } extern pgoff_t __folio_swap_cache_index(struct folio *folio); /** * folio_index - File index of a folio. * @folio: The folio. * * For a folio which is either in the page cache or the swap cache, * return its index within the address_space it belongs to. If you know * the page is definitely in the page cache, you can look at the folio's * index directly. * * Return: The index (offset in units of pages) of a folio in its file. */ static inline pgoff_t folio_index(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return __folio_swap_cache_index(folio); return folio->index; } /** * folio_next_index - Get the index of the next folio. * @folio: The current folio. * * Return: The index of the folio which follows this folio in the file. */ static inline pgoff_t folio_next_index(struct folio *folio) { return folio->index + folio_nr_pages(folio); } /** * folio_file_page - The page for a particular index. * @folio: The folio which contains this index. * @index: The index we want to look up. * * Sometimes after looking up a folio in the page cache, we need to * obtain the specific page for an index (eg a page fault). * * Return: The page containing the file data for this index. */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } /** * folio_contains - Does this folio contain this index? * @folio: The folio. * @index: The page index within the file. * * Context: The caller should have the page locked in order to prevent * (eg) shmem from moving the page between the page cache and swap cache * and changing its index in the middle of the operation. * Return: true or false. */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { return index - folio_index(folio) < folio_nr_pages(folio); } /* * Given the page we found in the page cache, return the page corresponding * to this index in the file */ static inline struct page *find_subpage(struct page *head, pgoff_t index) { /* HugeTLBfs wants the head page regardless */ if (PageHuge(head)) return head; return head + (index & (thp_nr_pages(head) - 1)); } unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_contig(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index); /* * Returns locked page at given index in given cache, creating it if needed. */ static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } struct folio *read_cache_folio(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, gfp_t flags); struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_page(mapping, index, NULL, file); } static inline struct folio *read_mapping_folio(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_folio(mapping, index, NULL, file); } /** * page_pgoff - Calculate the logical page offset of this page. * @folio: The folio containing this page. * @page: The page which we need the offset of. * * For file pages, this is the offset from the beginning of the file * in units of PAGE_SIZE. For anonymous pages, this is the offset from * the beginning of the anon_vma in units of PAGE_SIZE. This will * return nonsense for KSM pages. * * Context: Caller must have a reference on the folio or otherwise * prevent it from being split or freed. * * Return: The offset in units of PAGE_SIZE. */ static inline pgoff_t page_pgoff(const struct folio *folio, const struct page *page) { return folio->index + folio_page_idx(folio, page); } /* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) { return ((loff_t)page->index) << PAGE_SHIFT; } /** * folio_pos - Returns the byte position of this folio in its file. * @folio: The folio. */ static inline loff_t folio_pos(struct folio *folio) { return page_offset(&folio->page); } /* * Get the offset in PAGE_SIZE (even for hugetlb folios). */ static inline pgoff_t folio_pgoff(struct folio *folio) { return folio->index; } static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; } struct wait_page_key { struct folio *folio; int bit_nr; int page_match; }; struct wait_page_queue { struct folio *folio; int bit_nr; wait_queue_entry_t wait; }; static inline bool wake_page_match(struct wait_page_queue *wait_page, struct wait_page_key *key) { if (wait_page->folio != key->folio) return false; key->page_match = 1; if (wait_page->bit_nr != key->bit_nr) return false; return true; } void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); /** * folio_trylock() - Attempt to lock a folio. * @folio: The folio to attempt to lock. * * Sometimes it is undesirable to wait for a folio to be unlocked (eg * when the locks are being taken in the wrong order, or if making * progress through a batch of folios is more important than processing * them in order). Usually folio_lock() is the correct function to call. * * Context: Any context. * Return: Whether the lock was successfully acquired. */ static inline bool folio_trylock(struct folio *folio) { return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); } /* * Return true if the page was successfully locked */ static inline bool trylock_page(struct page *page) { return folio_trylock(page_folio(page)); } /** * folio_lock() - Lock this folio. * @folio: The folio to lock. * * The folio lock protects against many things, probably more than it * should. It is primarily held while a folio is being brought uptodate, * either from its backing file or from swap. It is also held while a * folio is being truncated from its address_space, so holding the lock * is sufficient to keep folio->mapping stable. * * The folio lock is also held while write() is modifying the page to * provide POSIX atomicity guarantees (as long as the write does not * cross a page boundary). Other modifications to the data in the folio * do not hold the folio lock and can race with writes, eg DMA and stores * to mapped pages. * * Context: May sleep. If you need to acquire the locks of two or * more folios, they must be in order of ascending index, if they are * in the same address_space. If they are in different address_spaces, * acquire the lock of the folio which belongs to the address_space which * has the lowest address in memory first. */ static inline void folio_lock(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) __folio_lock(folio); } /** * lock_page() - Lock the folio containing this page. * @page: The page to lock. * * See folio_lock() for a description of what the lock protects. * This is a legacy function and new code should probably use folio_lock() * instead. * * Context: May sleep. Pages in the same folio share a lock, so do not * attempt to lock two pages which share a folio. */ static inline void lock_page(struct page *page) { struct folio *folio; might_sleep(); folio = page_folio(page); if (!folio_trylock(folio)) __folio_lock(folio); } /** * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. * @folio: The folio to lock. * * Attempts to lock the folio, like folio_lock(), except that the sleep * to acquire the lock is interruptible by a fatal signal. * * Context: May sleep; see folio_lock(). * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. */ static inline int folio_lock_killable(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_killable(folio); return 0; } /* * folio_lock_or_retry - Lock the folio, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_lock implications depend on flags; see * __folio_lock_or_retry(). */ static inline vm_fault_t folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_or_retry(folio, vmf); return 0; } /* * This is exported only for folio_wait_locked/folio_wait_writeback, etc., * and should not be used directly. */ void folio_wait_bit(struct folio *folio, int bit_nr); int folio_wait_bit_killable(struct folio *folio, int bit_nr); /* * Wait for a folio to be unlocked. * * This must be called with the caller "holding" the folio, * ie with increased folio reference count so that the folio won't * go away during the wait. */ static inline void folio_wait_locked(struct folio *folio) { if (folio_test_locked(folio)) folio_wait_bit(folio, PG_locked); } static inline int folio_wait_locked_killable(struct folio *folio) { if (!folio_test_locked(folio)) return 0; return folio_wait_bit_killable(folio, PG_locked); } static inline void wait_on_page_locked(struct page *page) { folio_wait_locked(page_folio(page)); } void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); void end_page_writeback(struct page *page); void folio_end_writeback(struct folio *folio); void wait_for_stable_page(struct page *page); void folio_wait_stable(struct folio *folio); void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) { /* Avoid atomic ops, locking, etc. when not actually needed. */ if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_MIGRATION int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); #else #define filemap_migrate_folio NULL #endif void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); /* * Fault in userspace address range. */ size_t fault_in_writeable(char __user *uaddr, size_t size); size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp); void filemap_remove_folio(struct folio *folio); void __filemap_remove_folio(struct folio *folio, void *shadow); void replace_page_cache_folio(struct folio *old, struct folio *new); void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch); bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); bool filemap_range_has_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte); /** * filemap_range_needs_writeback - check if range potentially needs writeback * @mapping: address space within which to check * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Find at least one page in the range supplied, usually used to check if * direct writing in this range will trigger a writeback. Used by O_DIRECT * read/write with IOCB_NOWAIT, to see if the caller needs to do * filemap_write_and_wait_range() before proceeding. * * Return: %true if the caller should do filemap_write_and_wait_range() before * doing O_DIRECT to a page in this range, %false otherwise. */ static inline bool filemap_range_needs_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { if (!mapping->nrpages) return false; if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) return false; return filemap_range_has_writeback(mapping, start_byte, end_byte); } /** * struct readahead_control - Describes a readahead request. * * A readahead request is for consecutive pages. Filesystems which * implement the ->readahead method should call readahead_page() or * readahead_page_batch() in a loop and attempt to start I/O against * each page in the request. * * Most of the fields in this struct are private and should be accessed * by the functions below. * * @file: The file, used primarily by network filesystems for authentication. * May be NULL if invoked internally by the filesystem. * @mapping: Readahead this filesystem object. * @ra: File readahead state. May be NULL. */ struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; /* private: use the readahead_* accessors instead */ pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; bool dropbehind; bool _workingset; unsigned long _pflags; }; #define DEFINE_READAHEAD(ractl, f, r, m, i) \ struct readahead_control ractl = { \ .file = f, \ .mapping = m, \ .ra = r, \ ._index = i, \ } #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); void page_cache_async_ra(struct readahead_control *, struct folio *, unsigned long req_count); void readahead_expand(struct readahead_control *ractl, loff_t new_start, size_t new_len); /** * page_cache_sync_readahead - generic file readahead * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @index: Index of first page to be read. * @req_count: Total number of pages being read by the caller. * * page_cache_sync_readahead() should be called when a cache miss happened: * it will submit the read. The readahead logic may decide to piggyback more * pages onto the read request if access patterns suggest it will improve * performance. */ static inline void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); page_cache_sync_ra(&ractl, req_count); } /** * page_cache_async_readahead - file readahead for marked pages * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @folio: The folio which triggered the readahead call. * @req_count: Total number of pages being read by the caller. * * page_cache_async_readahead() should be called when a page is used which * is marked as PageReadahead; this is a marker to suggest that the application * has used up enough of the readahead window that we should start pulling in * more pages. */ static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, struct folio *folio, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); page_cache_async_ra(&ractl, folio, req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) { struct folio *folio; BUG_ON(ractl->_batch_count > ractl->_nr_pages); ractl->_nr_pages -= ractl->_batch_count; ractl->_index += ractl->_batch_count; if (!ractl->_nr_pages) { ractl->_batch_count = 0; return NULL; } folio = xa_load(&ractl->mapping->i_pages, ractl->_index); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); ractl->_batch_count = folio_nr_pages(folio); return folio; } /** * readahead_page - Get the next page to read. * @ractl: The current readahead request. * * Context: The page is locked and has an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: A pointer to the next page, or %NULL if we are done. */ static inline struct page *readahead_page(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); return &folio->page; } /** * readahead_folio - Get the next folio to read. * @ractl: The current readahead request. * * Context: The folio is locked. The caller should unlock the folio once * all I/O to that folio has completed. * Return: A pointer to the next folio, or %NULL if we are done. */ static inline struct folio *readahead_folio(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); if (folio) folio_put(folio); return folio; } static inline unsigned int __readahead_batch(struct readahead_control *rac, struct page **array, unsigned int array_sz) { unsigned int i = 0; XA_STATE(xas, &rac->mapping->i_pages, 0); struct page *page; BUG_ON(rac->_batch_count > rac->_nr_pages); rac->_nr_pages -= rac->_batch_count; rac->_index += rac->_batch_count; rac->_batch_count = 0; xas_set(&xas, rac->_index); rcu_read_lock(); xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { if (xas_retry(&xas, page)) continue; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; rac->_batch_count += thp_nr_pages(page); if (i == array_sz) break; } rcu_read_unlock(); return i; } /** * readahead_page_batch - Get a batch of pages to read. * @rac: The current readahead request. * @array: An array of pointers to struct page. * * Context: The pages are locked and have an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: The number of pages placed in the array. 0 indicates the request * is complete. */ #define readahead_page_batch(rac, array) \ __readahead_batch(rac, array, ARRAY_SIZE(array)) /** * readahead_pos - The byte offset into the file of this readahead request. * @rac: The readahead request. */ static inline loff_t readahead_pos(struct readahead_control *rac) { return (loff_t)rac->_index * PAGE_SIZE; } /** * readahead_length - The number of bytes in this readahead request. * @rac: The readahead request. */ static inline size_t readahead_length(struct readahead_control *rac) { return rac->_nr_pages * PAGE_SIZE; } /** * readahead_index - The index of the first page in this readahead request. * @rac: The readahead request. */ static inline pgoff_t readahead_index(struct readahead_control *rac) { return rac->_index; } /** * readahead_count - The number of pages in this readahead request. * @rac: The readahead request. */ static inline unsigned int readahead_count(struct readahead_control *rac) { return rac->_nr_pages; } /** * readahead_batch_length - The number of bytes in the current batch. * @rac: The readahead request. */ static inline size_t readahead_batch_length(struct readahead_control *rac) { return rac->_batch_count * PAGE_SIZE; } static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } /** * folio_mkwrite_check_truncate - check if folio was truncated * @folio: the folio to check * @inode: the inode to check the folio against * * Return: the number of bytes in the folio up to EOF, * or -EFAULT if the folio was truncated. */ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, struct inode *inode) { loff_t size = i_size_read(inode); pgoff_t index = size >> PAGE_SHIFT; size_t offset = offset_in_folio(folio, size); if (!folio->mapping) return -EFAULT; /* folio is wholly inside EOF */ if (folio_next_index(folio) - 1 < index) return folio_size(folio); /* folio is wholly past EOF */ if (folio->index > index || !offset) return -EFAULT; /* folio is partially inside EOF */ return offset; } /** * page_mkwrite_check_truncate - check if page was truncated * @page: the page to check * @inode: the inode to check the page against * * Returns the number of bytes in the page up to EOF, * or -EFAULT if the page was truncated. */ static inline int page_mkwrite_check_truncate(struct page *page, struct inode *inode) { loff_t size = i_size_read(inode); pgoff_t index = size >> PAGE_SHIFT; int offset = offset_in_page(size); if (page->mapping != inode->i_mapping) return -EFAULT; /* page is wholly inside EOF */ if (page->index < index) return PAGE_SIZE; /* page is wholly past EOF */ if (page->index > index || !offset) return -EFAULT; /* page is partially inside EOF */ return offset; } /** * i_blocks_per_folio - How many blocks fit in this folio. * @inode: The inode which contains the blocks. * @folio: The folio. * * If the block size is larger than the size of this folio, return zero. * * Context: The caller should hold a refcount on the folio to prevent it * from being split. * Return: The number of filesystem blocks covered by this folio. */ static inline unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) { return folio_size(folio) >> inode->i_blkbits; } #endif /* _LINUX_PAGEMAP_H */
3148 3162 3164 51 3163 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 /* SPDX-License-Identifier: GPL-2.0 */ /* * Released under the GPLv2 only. */ #include <linux/pm.h> #include <linux/acpi.h> struct usb_hub_descriptor; struct usb_dev_state; /* Functions local to drivers/usb/core/ */ extern int usb_create_sysfs_dev_files(struct usb_device *dev); extern void usb_remove_sysfs_dev_files(struct usb_device *dev); extern void usb_create_sysfs_intf_files(struct usb_interface *intf); extern void usb_remove_sysfs_intf_files(struct usb_interface *intf); extern int usb_update_wireless_status_attr(struct usb_interface *intf); extern int usb_create_ep_devs(struct device *parent, struct usb_host_endpoint *endpoint, struct usb_device *udev); extern void usb_remove_ep_devs(struct usb_host_endpoint *endpoint); extern void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_toggle); extern void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_toggles); extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware); extern void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware); extern void usb_release_interface_cache(struct kref *ref); extern void usb_disable_device(struct usb_device *dev, int skip_ep0); extern int usb_deauthorize_device(struct usb_device *); extern int usb_authorize_device(struct usb_device *); extern void usb_deauthorize_interface(struct usb_interface *); extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern void usb_release_quirk_list(void); extern bool usb_endpoint_is_ignored(struct usb_device *udev, struct usb_host_interface *intf, struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); extern struct usb_device_descriptor *usb_get_device_descriptor( struct usb_device *udev); extern int usb_set_isoch_delay(struct usb_device *dev); extern int usb_get_bos_descriptor(struct usb_device *dev); extern void usb_release_bos_descriptor(struct usb_device *dev); extern int usb_set_configuration(struct usb_device *dev, int configuration); extern int usb_choose_configuration(struct usb_device *udev); extern int usb_generic_driver_probe(struct usb_device *udev); extern void usb_generic_driver_disconnect(struct usb_device *udev); extern int usb_generic_driver_suspend(struct usb_device *udev, pm_message_t msg); extern int usb_generic_driver_resume(struct usb_device *udev, pm_message_t msg); static inline unsigned usb_get_max_power(struct usb_device *udev, struct usb_host_config *c) { /* SuperSpeed power is in 8 mA units; others are in 2 mA units */ unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2); return c->desc.bMaxPower * mul; } extern void usb_kick_hub_wq(struct usb_device *dev); extern int usb_match_one_id_intf(struct usb_device *dev, struct usb_host_interface *intf, const struct usb_device_id *id); extern int usb_match_device(struct usb_device *dev, const struct usb_device_id *id); extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev, const struct usb_device_id *id); extern bool usb_driver_applicable(struct usb_device *udev, const struct usb_device_driver *udrv); extern void usb_forced_unbind_intf(struct usb_interface *intf); extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev); extern void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner); extern bool usb_device_is_owned(struct usb_device *udev); extern int usb_hub_init(void); extern void usb_hub_cleanup(void); extern int usb_major_init(void); extern void usb_major_cleanup(void); extern int usb_device_supports_lpm(struct usb_device *udev); extern int usb_port_disable(struct usb_device *udev); #ifdef CONFIG_PM extern int usb_suspend(struct device *dev, pm_message_t msg); extern int usb_resume(struct device *dev, pm_message_t msg); extern int usb_resume_complete(struct device *dev); extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg); extern int usb_port_resume(struct usb_device *dev, pm_message_t msg); extern void usb_autosuspend_device(struct usb_device *udev); extern int usb_autoresume_device(struct usb_device *udev); extern int usb_remote_wakeup(struct usb_device *dev); extern int usb_runtime_suspend(struct device *dev); extern int usb_runtime_resume(struct device *dev); extern int usb_runtime_idle(struct device *dev); extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev); extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev); extern void usbfs_notify_suspend(struct usb_device *udev); extern void usbfs_notify_resume(struct usb_device *udev); #else static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg) { return 0; } static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg) { return 0; } #define usb_autosuspend_device(udev) do {} while (0) static inline int usb_autoresume_device(struct usb_device *udev) { return 0; } static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev) { return 0; } static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev) { return 0; } #endif extern const struct class usbmisc_class; extern const struct bus_type usb_bus_type; extern struct mutex usb_port_peer_mutex; extern const struct device_type usb_device_type; extern const struct device_type usb_if_device_type; extern const struct device_type usb_ep_device_type; extern const struct device_type usb_port_device_type; extern struct usb_device_driver usb_generic_driver; static inline int is_usb_device(const struct device *dev) { return dev->type == &usb_device_type; } static inline int is_usb_interface(const struct device *dev) { return dev->type == &usb_if_device_type; } static inline int is_usb_endpoint(const struct device *dev) { return dev->type == &usb_ep_device_type; } static inline int is_usb_port(const struct device *dev) { return dev->type == &usb_port_device_type; } static inline int is_root_hub(struct usb_device *udev) { return (udev->parent == NULL); } extern bool is_usb_device_driver(const struct device_driver *drv); /* for labeling diagnostics */ extern const char *usbcore_name; /* sysfs stuff */ extern const struct attribute_group *usb_device_groups[]; extern const struct attribute_group *usb_interface_groups[]; /* usbfs stuff */ extern struct usb_driver usbfs_driver; extern const struct file_operations usbfs_devices_fops; extern const struct file_operations usbdev_file_operations; extern int usb_devio_init(void); extern void usb_devio_cleanup(void); /* * Firmware specific cookie identifying a port's location. '0' == no location * data available */ typedef u32 usb_port_location_t; /* internal notify stuff */ extern void usb_notify_add_device(struct usb_device *udev); extern void usb_notify_remove_device(struct usb_device *udev); extern void usb_notify_add_bus(struct usb_bus *ubus); extern void usb_notify_remove_bus(struct usb_bus *ubus); extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev, struct usb_hub_descriptor *desc); #ifdef CONFIG_ACPI extern int usb_acpi_register(void); extern void usb_acpi_unregister(void); extern acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, int port1); #else static inline int usb_acpi_register(void) { return 0; }; static inline void usb_acpi_unregister(void) { }; #endif
11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 // SPDX-License-Identifier: GPL-2.0-only /* * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. * * Rewritten and vastly simplified by Rusty Russell for in-kernel * module loader: * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * ChangeLog: * * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> * Changed the compression method from stem compression to "table lookup" * compression (see scripts/kallsyms.c for a more complete description) */ #include <linux/kallsyms.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/kdb.h> #include <linux/err.h> #include <linux/proc_fs.h> #include <linux/sched.h> /* for cond_resched */ #include <linux/ctype.h> #include <linux/slab.h> #include <linux/filter.h> #include <linux/ftrace.h> #include <linux/kprobes.h> #include <linux/build_bug.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bsearch.h> #include <linux/btf_ids.h> #include "kallsyms_internal.h" /* * Expand a compressed symbol data into the resulting uncompressed string, * if uncompressed string is too long (>= maxlen), it will be truncated, * given the offset to where the symbol is in the compressed stream. */ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result, size_t maxlen) { int len, skipped_first = 0; const char *tptr; const u8 *data; /* Get the compressed symbol length from the first symbol byte. */ data = &kallsyms_names[off]; len = *data; data++; off++; /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */ if ((len & 0x80) != 0) { len = (len & 0x7F) | (*data << 7); data++; off++; } /* * Update the offset to return the offset for the next symbol on * the compressed stream. */ off += len; /* * For every byte on the compressed symbol data, copy the table * entry for that byte. */ while (len) { tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; data++; len--; while (*tptr) { if (skipped_first) { if (maxlen <= 1) goto tail; *result = *tptr; result++; maxlen--; } else skipped_first = 1; tptr++; } } tail: if (maxlen) *result = '\0'; /* Return to offset to the next symbol. */ return off; } /* * Get symbol type information. This is encoded as a single char at the * beginning of the symbol name. */ static char kallsyms_get_symbol_type(unsigned int off) { /* * Get just the first code, look it up in the token table, * and return the first char from this token. */ return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; } /* * Find the offset on the compressed stream given and index in the * kallsyms array. */ static unsigned int get_symbol_offset(unsigned long pos) { const u8 *name; int i, len; /* * Use the closest marker we have. We have markers every 256 positions, * so that should be close enough. */ name = &kallsyms_names[kallsyms_markers[pos >> 8]]; /* * Sequentially scan all the symbols up to the point we're searching * for. Every symbol is stored in a [<len>][<len> bytes of data] format, * so we just need to add the len to the current pointer for every * symbol we wish to skip. */ for (i = 0; i < (pos & 0xFF); i++) { len = *name; /* * If MSB is 1, it is a "big" symbol, so we need to look into * the next byte (and skip it, too). */ if ((len & 0x80) != 0) len = ((len & 0x7F) | (name[1] << 7)) + 1; name = name + len + 1; } return name - kallsyms_names; } unsigned long kallsyms_sym_address(int idx) { /* values are unsigned offsets if --absolute-percpu is not in effect */ if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; /* ...otherwise, positive offsets are absolute values */ if (kallsyms_offsets[idx] >= 0) return kallsyms_offsets[idx]; /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; } static unsigned int get_symbol_seq(int index) { unsigned int i, seq = 0; for (i = 0; i < 3; i++) seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i]; return seq; } static int kallsyms_lookup_names(const char *name, unsigned int *start, unsigned int *end) { int ret; int low, mid, high; unsigned int seq, off; char namebuf[KSYM_NAME_LEN]; low = 0; high = kallsyms_num_syms - 1; while (low <= high) { mid = low + (high - low) / 2; seq = get_symbol_seq(mid); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); ret = strcmp(name, namebuf); if (ret > 0) low = mid + 1; else if (ret < 0) high = mid - 1; else break; } if (low > high) return -ESRCH; low = mid; while (low) { seq = get_symbol_seq(low - 1); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); if (strcmp(name, namebuf)) break; low--; } *start = low; if (end) { high = mid; while (high < kallsyms_num_syms - 1) { seq = get_symbol_seq(high + 1); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); if (strcmp(name, namebuf)) break; high++; } *end = high; } return 0; } /* Lookup the address for this symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name) { int ret; unsigned int i; /* Skip the search for empty string. */ if (!*name) return 0; ret = kallsyms_lookup_names(name, &i, NULL); if (!ret) return kallsyms_sym_address(get_symbol_seq(i)); return module_kallsyms_lookup_name(name); } /* * Iterate over all symbols in vmlinux. For symbols from modules use * module_kallsyms_on_each_symbol instead. */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), void *data) { char namebuf[KSYM_NAME_LEN]; unsigned long i; unsigned int off; int ret; for (i = 0, off = 0; i < kallsyms_num_syms; i++) { off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); ret = fn(data, namebuf, kallsyms_sym_address(i)); if (ret != 0) return ret; cond_resched(); } return 0; } int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), const char *name, void *data) { int ret; unsigned int i, start, end; ret = kallsyms_lookup_names(name, &start, &end); if (ret) return 0; for (i = start; !ret && i <= end; i++) { ret = fn(data, kallsyms_sym_address(get_symbol_seq(i))); cond_resched(); } return ret; } static unsigned long get_symbol_pos(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { unsigned long symbol_start = 0, symbol_end = 0; unsigned long i, low, high, mid; /* Do a binary search on the sorted kallsyms_offsets array. */ low = 0; high = kallsyms_num_syms; while (high - low > 1) { mid = low + (high - low) / 2; if (kallsyms_sym_address(mid) <= addr) low = mid; else high = mid; } /* * Search for the first aliased symbol. Aliased * symbols are symbols with the same address. */ while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) --low; symbol_start = kallsyms_sym_address(low); /* Search for next non-aliased symbol. */ for (i = low + 1; i < kallsyms_num_syms; i++) { if (kallsyms_sym_address(i) > symbol_start) { symbol_end = kallsyms_sym_address(i); break; } } /* If we found no next symbol, we use the end of the section. */ if (!symbol_end) { if (is_kernel_inittext(addr)) symbol_end = (unsigned long)_einittext; else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) symbol_end = (unsigned long)_end; else symbol_end = (unsigned long)_etext; } if (symbolsize) *symbolsize = symbol_end - symbol_start; if (offset) *offset = addr - symbol_start; return low; } /* * Lookup an address but don't bother to find any names. */ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; if (is_ksym_addr(addr)) { get_symbol_pos(addr, symbolsize, offset); return 1; } return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } static int kallsyms_lookup_buildid(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, const unsigned char **modbuildid, char *namebuf) { int ret; namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; if (is_ksym_addr(addr)) { unsigned long pos; pos = get_symbol_pos(addr, symbolsize, offset); /* Grab name */ kallsyms_expand_symbol(get_symbol_offset(pos), namebuf, KSYM_NAME_LEN); if (modname) *modname = NULL; if (modbuildid) *modbuildid = NULL; return strlen(namebuf); } /* See if it's in a module or a BPF JITed image. */ ret = module_address_lookup(addr, symbolsize, offset, modname, modbuildid, namebuf); if (!ret) ret = bpf_address_lookup(addr, symbolsize, offset, modname, namebuf); if (!ret) ret = ftrace_mod_address_lookup(addr, symbolsize, offset, modname, namebuf); return ret; } /* * Lookup an address * - modname is set to NULL if it's in the kernel. * - We guarantee that the returned name is valid until we reschedule even if. * It resides in a module. * - We also guarantee that modname will be valid until rescheduled. */ const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname, NULL, namebuf); if (!ret) return NULL; return namebuf; } int lookup_symbol_name(unsigned long addr, char *symname) { symname[0] = '\0'; symname[KSYM_NAME_LEN - 1] = '\0'; if (is_ksym_addr(addr)) { unsigned long pos; pos = get_symbol_pos(addr, NULL, NULL); /* Grab name */ kallsyms_expand_symbol(get_symbol_offset(pos), symname, KSYM_NAME_LEN); return 0; } /* See if it's in a module. */ return lookup_module_symbol_name(addr, symname); } /* Look up a kernel symbol and return it in a text buffer. */ static int __sprint_symbol(char *buffer, unsigned long address, int symbol_offset, int add_offset, int add_buildid) { char *modname; const unsigned char *buildid; unsigned long offset, size; int len; address += symbol_offset; len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid, buffer); if (!len) return sprintf(buffer, "0x%lx", address - symbol_offset); offset -= symbol_offset; if (add_offset) len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); if (modname) { len += sprintf(buffer + len, " [%s", modname); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) if (add_buildid && buildid) { /* build ID should match length of sprintf */ #if IS_ENABLED(CONFIG_MODULES) static_assert(sizeof(typeof_member(struct module, build_id)) == 20); #endif len += sprintf(buffer + len, " %20phN", buildid); } #endif len += sprintf(buffer + len, "]"); } return len; } /** * sprint_symbol - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name, * offset, size and module name to @buffer if possible. If no symbol was found, * just saves its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 1, 0); } EXPORT_SYMBOL_GPL(sprint_symbol); /** * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name, * offset, size, module name and module build ID to @buffer if possible. If no * symbol was found, just saves its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol_build_id(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 1, 1); } EXPORT_SYMBOL_GPL(sprint_symbol_build_id); /** * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name * and module name to @buffer if possible. If no symbol was found, just saves * its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol_no_offset(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 0, 0); } EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); /** * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function is for stack backtrace and does the same thing as * sprint_symbol() but with modified/decreased @address. If there is a * tail-call to the function marked "noreturn", gcc optimized out code after * the call so that the stack-saved return address could point outside of the * caller. This function ensures that kallsyms will find the original caller * by decreasing @address. * * This function returns the number of bytes stored in @buffer. */ int sprint_backtrace(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, -1, 1, 0); } /** * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function is for stack backtrace and does the same thing as * sprint_symbol() but with modified/decreased @address. If there is a * tail-call to the function marked "noreturn", gcc optimized out code after * the call so that the stack-saved return address could point outside of the * caller. This function ensures that kallsyms will find the original caller * by decreasing @address. This function also appends the module build ID to * the @buffer if @address is within a kernel module. * * This function returns the number of bytes stored in @buffer. */ int sprint_backtrace_build_id(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, -1, 1, 1); } /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ struct kallsym_iter { loff_t pos; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; loff_t pos_bpf_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; char name[KSYM_NAME_LEN]; char module_name[MODULE_NAME_LEN]; int exported; int show_value; }; static int get_ksymbol_mod(struct kallsym_iter *iter) { int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, &iter->type, iter->name, iter->module_name, &iter->exported); if (ret < 0) { iter->pos_mod_end = iter->pos; return 0; } return 1; } /* * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace * purposes. In that case "__builtin__ftrace" is used as a module name, even * though "__builtin__ftrace" is not a module. */ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) { int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, &iter->value, &iter->type, iter->name, iter->module_name, &iter->exported); if (ret < 0) { iter->pos_ftrace_mod_end = iter->pos; return 0; } return 1; } static int get_ksymbol_bpf(struct kallsym_iter *iter) { int ret; strscpy(iter->module_name, "bpf", MODULE_NAME_LEN); iter->exported = 0; ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, &iter->value, &iter->type, iter->name); if (ret < 0) { iter->pos_bpf_end = iter->pos; return 0; } return 1; } /* * This uses "__builtin__kprobes" as a module name for symbols for pages * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a * module. */ static int get_ksymbol_kprobe(struct kallsym_iter *iter) { strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); iter->exported = 0; return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, &iter->value, &iter->type, iter->name) < 0 ? 0 : 1; } /* Returns space to next name. */ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) { unsigned off = iter->nameoff; iter->module_name[0] = '\0'; iter->value = kallsyms_sym_address(iter->pos); iter->type = kallsyms_get_symbol_type(off); off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); return off - iter->nameoff; } static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) { iter->name[0] = '\0'; iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; if (new_pos == 0) { iter->pos_mod_end = 0; iter->pos_ftrace_mod_end = 0; iter->pos_bpf_end = 0; } } /* * The end position (last + 1) of each additional kallsyms section is recorded * in iter->pos_..._end as each section is added, and so can be used to * determine which get_ksymbol_...() function to call next. */ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) { iter->pos = pos; if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && get_ksymbol_mod(iter)) return 1; if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && get_ksymbol_ftrace_mod(iter)) return 1; if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && get_ksymbol_bpf(iter)) return 1; return get_ksymbol_kprobe(iter); } /* Returns false if pos at or past end of file. */ static int update_iter(struct kallsym_iter *iter, loff_t pos) { /* Module symbols can be accessed randomly. */ if (pos >= kallsyms_num_syms) return update_iter_mod(iter, pos); /* If we're not on the desired position, reset to new position. */ if (pos != iter->pos) reset_iter(iter, pos); iter->nameoff += get_ksymbol_core(iter); iter->pos++; return 1; } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { (*pos)++; if (!update_iter(m->private, *pos)) return NULL; return p; } static void *s_start(struct seq_file *m, loff_t *pos) { if (!update_iter(m->private, *pos)) return NULL; return m->private; } static void s_stop(struct seq_file *m, void *p) { } static int s_show(struct seq_file *m, void *p) { void *value; struct kallsym_iter *iter = m->private; /* Some debugging symbols have no name. Ignore them. */ if (!iter->name[0]) return 0; value = iter->show_value ? (void *)iter->value : NULL; if (iter->module_name[0]) { char type; /* * Label it "global" if it is exported, * "local" if not exported. */ type = iter->exported ? toupper(iter->type) : tolower(iter->type); seq_printf(m, "%px %c %s\t[%s]\n", value, type, iter->name, iter->module_name); } else seq_printf(m, "%px %c %s\n", value, iter->type, iter->name); return 0; } static const struct seq_operations kallsyms_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show }; #ifdef CONFIG_BPF_SYSCALL struct bpf_iter__ksym { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct kallsym_iter *, ksym); }; static int ksym_prog_seq_show(struct seq_file *m, bool in_stop) { struct bpf_iter__ksym ctx; struct bpf_iter_meta meta; struct bpf_prog *prog; meta.seq = m; prog = bpf_iter_get_info(&meta, in_stop); if (!prog) return 0; ctx.meta = &meta; ctx.ksym = m ? m->private : NULL; return bpf_iter_run_prog(prog, &ctx); } static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p) { return ksym_prog_seq_show(m, false); } static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p) { if (!p) (void) ksym_prog_seq_show(m, true); else s_stop(m, p); } static const struct seq_operations bpf_iter_ksym_ops = { .start = s_start, .next = s_next, .stop = bpf_iter_ksym_seq_stop, .show = bpf_iter_ksym_seq_show, }; static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux) { struct kallsym_iter *iter = priv_data; reset_iter(iter, 0); /* cache here as in kallsyms_open() case; use current process * credentials to tell BPF iterators if values should be shown. */ iter->show_value = kallsyms_show_value(current_cred()); return 0; } DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym) static const struct bpf_iter_seq_info ksym_iter_seq_info = { .seq_ops = &bpf_iter_ksym_ops, .init_seq_private = bpf_iter_ksym_init, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct kallsym_iter), }; static struct bpf_iter_reg ksym_iter_reg_info = { .target = "ksym", .feature = BPF_ITER_RESCHED, .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__ksym, ksym), PTR_TO_BTF_ID_OR_NULL }, }, .seq_info = &ksym_iter_seq_info, }; BTF_ID_LIST(btf_ksym_iter_id) BTF_ID(struct, kallsym_iter) static int __init bpf_ksym_iter_register(void) { ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; return bpf_iter_reg_target(&ksym_iter_reg_info); } late_initcall(bpf_ksym_iter_register); #endif /* CONFIG_BPF_SYSCALL */ static int kallsyms_open(struct inode *inode, struct file *file) { /* * We keep iterator in m->private, since normal case is to * s_start from where we left off, so we avoid doing * using get_symbol_offset for every symbol. */ struct kallsym_iter *iter; iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); if (!iter) return -ENOMEM; reset_iter(iter, 0); /* * Instead of checking this on every s_show() call, cache * the result here at open time. */ iter->show_value = kallsyms_show_value(file->f_cred); return 0; } #ifdef CONFIG_KGDB_KDB const char *kdb_walk_kallsyms(loff_t *pos) { static struct kallsym_iter kdb_walk_kallsyms_iter; if (*pos == 0) { memset(&kdb_walk_kallsyms_iter, 0, sizeof(kdb_walk_kallsyms_iter)); reset_iter(&kdb_walk_kallsyms_iter, 0); } while (1) { if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) return NULL; ++*pos; /* Some debugging symbols have no name. Ignore them. */ if (kdb_walk_kallsyms_iter.name[0]) return kdb_walk_kallsyms_iter.name; } } #endif /* CONFIG_KGDB_KDB */ static const struct proc_ops kallsyms_proc_ops = { .proc_open = kallsyms_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release_private, }; static int __init kallsyms_init(void) { proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); return 0; } device_initcall(kallsyms_init);
2 2 2 2 2 2 2 1 1 1 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Corsair Void headsets * * Copyright (C) 2023-2024 Stuart Hayhurst */ /* -------------------------------------------------------------------------- */ /* Receiver report information: (ID 100) */ /* -------------------------------------------------------------------------- */ /* * When queried, the receiver reponds with 5 bytes to describe the battery * The power button, mute button and moving the mic also trigger this report * This includes power button + mic + connection + battery status and capacity * The information below may not be perfect, it's been gathered through guesses * * 0: REPORT ID * 100 for the battery packet * * 1: POWER BUTTON + (?) * Largest bit is 1 when power button pressed * * 2: BATTERY CAPACITY + MIC STATUS * Battery capacity: * Seems to report ~54 higher than reality when charging * Capped at 100, charging or not * Microphone status: * Largest bit is set to 1 when the mic is physically up * No bits change when the mic is muted, only when physically moved * This report is sent every time the mic is moved, no polling required * * 3: CONNECTION STATUS * 16: Wired headset * 38: Initialising * 49: Lost connection * 51: Disconnected, searching * 52: Disconnected, not searching * 177: Normal * * 4: BATTERY STATUS * 0: Disconnected * 1: Normal * 2: Low * 3: Critical - sent during shutdown * 4: Fully charged * 5: Charging */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* Receiver report information: (ID 102) */ /* -------------------------------------------------------------------------- */ /* * When queried, the recevier responds with 4 bytes to describe the firmware * The first 2 bytes are for the receiver, the second 2 are the headset * The headset firmware version will be 0 if no headset is connected * * 0: Recevier firmware major version * Major version of the receiver's firmware * * 1: Recevier firmware minor version * Minor version of the receiver's firmware * * 2: Headset firmware major version * Major version of the headset's firmware * * 3: Headset firmware minor version * Minor version of the headset's firmware */ /* -------------------------------------------------------------------------- */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/power_supply.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <asm/byteorder.h> #include "hid-ids.h" #define CORSAIR_VOID_DEVICE(id, type) { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, (id)), \ .driver_data = (type) } #define CORSAIR_VOID_WIRELESS_DEVICE(id) CORSAIR_VOID_DEVICE((id), CORSAIR_VOID_WIRELESS) #define CORSAIR_VOID_WIRED_DEVICE(id) CORSAIR_VOID_DEVICE((id), CORSAIR_VOID_WIRED) #define CORSAIR_VOID_STATUS_REQUEST_ID 0xC9 #define CORSAIR_VOID_NOTIF_REQUEST_ID 0xCA #define CORSAIR_VOID_SIDETONE_REQUEST_ID 0xFF #define CORSAIR_VOID_STATUS_REPORT_ID 0x64 #define CORSAIR_VOID_FIRMWARE_REPORT_ID 0x66 #define CORSAIR_VOID_USB_SIDETONE_REQUEST 0x1 #define CORSAIR_VOID_USB_SIDETONE_REQUEST_TYPE 0x21 #define CORSAIR_VOID_USB_SIDETONE_VALUE 0x200 #define CORSAIR_VOID_USB_SIDETONE_INDEX 0xB00 #define CORSAIR_VOID_MIC_MASK GENMASK(7, 7) #define CORSAIR_VOID_CAPACITY_MASK GENMASK(6, 0) #define CORSAIR_VOID_WIRELESS_CONNECTED 177 #define CORSAIR_VOID_SIDETONE_MAX_WIRELESS 55 #define CORSAIR_VOID_SIDETONE_MAX_WIRED 4096 enum { CORSAIR_VOID_WIRELESS, CORSAIR_VOID_WIRED, }; enum { CORSAIR_VOID_BATTERY_NORMAL = 1, CORSAIR_VOID_BATTERY_LOW = 2, CORSAIR_VOID_BATTERY_CRITICAL = 3, CORSAIR_VOID_BATTERY_CHARGED = 4, CORSAIR_VOID_BATTERY_CHARGING = 5, }; static enum power_supply_property corsair_void_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; struct corsair_void_battery_data { int status; bool present; int capacity; int capacity_level; }; struct corsair_void_drvdata { struct hid_device *hid_dev; struct device *dev; char *name; bool is_wired; unsigned int sidetone_max; struct corsair_void_battery_data battery_data; bool mic_up; bool connected; int fw_receiver_major; int fw_receiver_minor; int fw_headset_major; int fw_headset_minor; struct power_supply *battery; struct power_supply_desc battery_desc; struct mutex battery_mutex; struct delayed_work delayed_status_work; struct delayed_work delayed_firmware_work; struct work_struct battery_remove_work; struct work_struct battery_add_work; }; /* * Functions to process receiver data */ static void corsair_void_set_wireless_status(struct corsair_void_drvdata *drvdata) { struct usb_interface *usb_if = to_usb_interface(drvdata->dev->parent); if (drvdata->is_wired) return; usb_set_wireless_status(usb_if, drvdata->connected ? USB_WIRELESS_STATUS_CONNECTED : USB_WIRELESS_STATUS_DISCONNECTED); } static void corsair_void_set_unknown_batt(struct corsair_void_drvdata *drvdata) { struct corsair_void_battery_data *battery_data = &drvdata->battery_data; battery_data->status = POWER_SUPPLY_STATUS_UNKNOWN; battery_data->present = false; battery_data->capacity = 0; battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } /* Reset data that may change between wireless connections */ static void corsair_void_set_unknown_wireless_data(struct corsair_void_drvdata *drvdata) { /* Only 0 out headset, receiver is always known if relevant */ drvdata->fw_headset_major = 0; drvdata->fw_headset_minor = 0; drvdata->connected = false; drvdata->mic_up = false; corsair_void_set_wireless_status(drvdata); } static void corsair_void_process_receiver(struct corsair_void_drvdata *drvdata, int raw_battery_capacity, int raw_connection_status, int raw_battery_status) { struct corsair_void_battery_data *battery_data = &drvdata->battery_data; struct corsair_void_battery_data orig_battery_data; /* Save initial battery data, to compare later */ orig_battery_data = *battery_data; /* Headset not connected, or it's wired */ if (raw_connection_status != CORSAIR_VOID_WIRELESS_CONNECTED) goto unknown_battery; /* Battery information unavailable */ if (raw_battery_status == 0) goto unknown_battery; /* Battery must be connected then */ battery_data->present = true; battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; /* Set battery status */ switch (raw_battery_status) { case CORSAIR_VOID_BATTERY_NORMAL: case CORSAIR_VOID_BATTERY_LOW: case CORSAIR_VOID_BATTERY_CRITICAL: battery_data->status = POWER_SUPPLY_STATUS_DISCHARGING; if (raw_battery_status == CORSAIR_VOID_BATTERY_LOW) battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (raw_battery_status == CORSAIR_VOID_BATTERY_CRITICAL) battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case CORSAIR_VOID_BATTERY_CHARGED: battery_data->status = POWER_SUPPLY_STATUS_FULL; break; case CORSAIR_VOID_BATTERY_CHARGING: battery_data->status = POWER_SUPPLY_STATUS_CHARGING; break; default: hid_warn(drvdata->hid_dev, "unknown battery status '%d'", raw_battery_status); goto unknown_battery; break; } battery_data->capacity = raw_battery_capacity; corsair_void_set_wireless_status(drvdata); goto success; unknown_battery: corsair_void_set_unknown_batt(drvdata); success: /* Inform power supply if battery values changed */ if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) { scoped_guard(mutex, &drvdata->battery_mutex) { if (drvdata->battery) { power_supply_changed(drvdata->battery); } } } } /* * Functions to report stored data */ static int corsair_void_battery_get_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct corsair_void_drvdata *drvdata = power_supply_get_drvdata(psy); switch (prop) { case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_MODEL_NAME: if (!strncmp(drvdata->hid_dev->name, "Corsair ", 8)) val->strval = drvdata->hid_dev->name + 8; else val->strval = drvdata->hid_dev->name; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "Corsair"; break; case POWER_SUPPLY_PROP_STATUS: val->intval = drvdata->battery_data.status; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = drvdata->battery_data.present; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = drvdata->battery_data.capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = drvdata->battery_data.capacity_level; break; default: return -EINVAL; } return 0; } static ssize_t microphone_up_show(struct device *dev, struct device_attribute *attr, char *buf) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); if (!drvdata->connected) return -ENODEV; return sysfs_emit(buf, "%d\n", drvdata->mic_up); } static ssize_t fw_version_receiver_show(struct device *dev, struct device_attribute *attr, char *buf) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata->fw_receiver_major == 0 && drvdata->fw_receiver_minor == 0) return -ENODATA; return sysfs_emit(buf, "%d.%02d\n", drvdata->fw_receiver_major, drvdata->fw_receiver_minor); } static ssize_t fw_version_headset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata->fw_headset_major == 0 && drvdata->fw_headset_minor == 0) return -ENODATA; return sysfs_emit(buf, "%d.%02d\n", drvdata->fw_headset_major, drvdata->fw_headset_minor); } static ssize_t sidetone_max_show(struct device *dev, struct device_attribute *attr, char *buf) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); return sysfs_emit(buf, "%d\n", drvdata->sidetone_max); } /* * Functions to send data to headset */ static ssize_t send_alert_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); struct hid_device *hid_dev = drvdata->hid_dev; unsigned char alert_id; unsigned char *send_buf __free(kfree) = NULL; int ret; if (!drvdata->connected || drvdata->is_wired) return -ENODEV; /* Only accept 0 or 1 for alert ID */ if (kstrtou8(buf, 10, &alert_id) || alert_id >= 2) return -EINVAL; send_buf = kmalloc(3, GFP_KERNEL); if (!send_buf) return -ENOMEM; /* Packet format to send alert with ID alert_id */ send_buf[0] = CORSAIR_VOID_NOTIF_REQUEST_ID; send_buf[1] = 0x02; send_buf[2] = alert_id; ret = hid_hw_raw_request(hid_dev, CORSAIR_VOID_NOTIF_REQUEST_ID, send_buf, 3, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_warn(hid_dev, "failed to send alert request (reason: %d)", ret); else ret = count; return ret; } static int corsair_void_set_sidetone_wired(struct device *dev, const char *buf, unsigned int sidetone) { struct usb_interface *usb_if = to_usb_interface(dev->parent); struct usb_device *usb_dev = interface_to_usbdev(usb_if); /* Packet format to set sidetone for wired headsets */ __le16 sidetone_le = cpu_to_le16(sidetone); return usb_control_msg_send(usb_dev, 0, CORSAIR_VOID_USB_SIDETONE_REQUEST, CORSAIR_VOID_USB_SIDETONE_REQUEST_TYPE, CORSAIR_VOID_USB_SIDETONE_VALUE, CORSAIR_VOID_USB_SIDETONE_INDEX, &sidetone_le, 2, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int corsair_void_set_sidetone_wireless(struct device *dev, const char *buf, unsigned char sidetone) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); struct hid_device *hid_dev = drvdata->hid_dev; unsigned char *send_buf __free(kfree) = NULL; send_buf = kmalloc(12, GFP_KERNEL); if (!send_buf) return -ENOMEM; /* Packet format to set sidetone for wireless headsets */ send_buf[0] = CORSAIR_VOID_SIDETONE_REQUEST_ID; send_buf[1] = 0x0B; send_buf[2] = 0x00; send_buf[3] = 0xFF; send_buf[4] = 0x04; send_buf[5] = 0x0E; send_buf[6] = 0xFF; send_buf[7] = 0x05; send_buf[8] = 0x01; send_buf[9] = 0x04; send_buf[10] = 0x00; send_buf[11] = sidetone + 200; return hid_hw_raw_request(hid_dev, CORSAIR_VOID_SIDETONE_REQUEST_ID, send_buf, 12, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); } static ssize_t set_sidetone_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev); struct hid_device *hid_dev = drvdata->hid_dev; unsigned int sidetone; int ret; if (!drvdata->connected) return -ENODEV; /* sidetone must be between 0 and drvdata->sidetone_max inclusive */ if (kstrtouint(buf, 10, &sidetone) || sidetone > drvdata->sidetone_max) return -EINVAL; if (drvdata->is_wired) ret = corsair_void_set_sidetone_wired(dev, buf, sidetone); else ret = corsair_void_set_sidetone_wireless(dev, buf, sidetone); if (ret < 0) hid_warn(hid_dev, "failed to send sidetone (reason: %d)", ret); else ret = count; return ret; } static int corsair_void_request_status(struct hid_device *hid_dev, int id) { unsigned char *send_buf __free(kfree) = NULL; send_buf = kmalloc(2, GFP_KERNEL); if (!send_buf) return -ENOMEM; /* Packet format to request data item (status / firmware) refresh */ send_buf[0] = CORSAIR_VOID_STATUS_REQUEST_ID; send_buf[1] = id; /* Send request for data refresh */ return hid_hw_raw_request(hid_dev, CORSAIR_VOID_STATUS_REQUEST_ID, send_buf, 2, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } /* * Headset connect / disconnect handlers and work handlers */ static void corsair_void_status_work_handler(struct work_struct *work) { struct corsair_void_drvdata *drvdata; struct delayed_work *delayed_work; int battery_ret; delayed_work = container_of(work, struct delayed_work, work); drvdata = container_of(delayed_work, struct corsair_void_drvdata, delayed_status_work); battery_ret = corsair_void_request_status(drvdata->hid_dev, CORSAIR_VOID_STATUS_REPORT_ID); if (battery_ret < 0) { hid_warn(drvdata->hid_dev, "failed to request battery (reason: %d)", battery_ret); } } static void corsair_void_firmware_work_handler(struct work_struct *work) { struct corsair_void_drvdata *drvdata; struct delayed_work *delayed_work; int firmware_ret; delayed_work = container_of(work, struct delayed_work, work); drvdata = container_of(delayed_work, struct corsair_void_drvdata, delayed_firmware_work); firmware_ret = corsair_void_request_status(drvdata->hid_dev, CORSAIR_VOID_FIRMWARE_REPORT_ID); if (firmware_ret < 0) { hid_warn(drvdata->hid_dev, "failed to request firmware (reason: %d)", firmware_ret); } } static void corsair_void_battery_remove_work_handler(struct work_struct *work) { struct corsair_void_drvdata *drvdata; drvdata = container_of(work, struct corsair_void_drvdata, battery_remove_work); scoped_guard(mutex, &drvdata->battery_mutex) { if (drvdata->battery) { power_supply_unregister(drvdata->battery); drvdata->battery = NULL; } } } static void corsair_void_battery_add_work_handler(struct work_struct *work) { struct corsair_void_drvdata *drvdata; struct power_supply_config psy_cfg; struct power_supply *new_supply; drvdata = container_of(work, struct corsair_void_drvdata, battery_add_work); guard(mutex)(&drvdata->battery_mutex); if (drvdata->battery) return; psy_cfg.drv_data = drvdata; new_supply = power_supply_register(drvdata->dev, &drvdata->battery_desc, &psy_cfg); if (IS_ERR(new_supply)) { hid_err(drvdata->hid_dev, "failed to register battery '%s' (reason: %ld)\n", drvdata->battery_desc.name, PTR_ERR(new_supply)); return; } if (power_supply_powers(new_supply, drvdata->dev)) { power_supply_unregister(new_supply); return; } drvdata->battery = new_supply; } static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata) { schedule_work(&drvdata->battery_add_work); schedule_delayed_work(&drvdata->delayed_firmware_work, msecs_to_jiffies(100)); } static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata) { schedule_work(&drvdata->battery_remove_work); corsair_void_set_unknown_wireless_data(drvdata); corsair_void_set_unknown_batt(drvdata); } /* * Driver setup, probing and HID event handling */ static DEVICE_ATTR_RO(fw_version_receiver); static DEVICE_ATTR_RO(fw_version_headset); static DEVICE_ATTR_RO(microphone_up); static DEVICE_ATTR_RO(sidetone_max); static DEVICE_ATTR_WO(send_alert); static DEVICE_ATTR_WO(set_sidetone); static struct attribute *corsair_void_attrs[] = { &dev_attr_fw_version_receiver.attr, &dev_attr_fw_version_headset.attr, &dev_attr_microphone_up.attr, &dev_attr_send_alert.attr, &dev_attr_set_sidetone.attr, &dev_attr_sidetone_max.attr, NULL, }; static const struct attribute_group corsair_void_attr_group = { .attrs = corsair_void_attrs, }; static int corsair_void_probe(struct hid_device *hid_dev, const struct hid_device_id *hid_id) { int ret; struct corsair_void_drvdata *drvdata; char *name; if (!hid_is_usb(hid_dev)) return -EINVAL; drvdata = devm_kzalloc(&hid_dev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; hid_set_drvdata(hid_dev, drvdata); dev_set_drvdata(&hid_dev->dev, drvdata); drvdata->dev = &hid_dev->dev; drvdata->hid_dev = hid_dev; drvdata->is_wired = hid_id->driver_data == CORSAIR_VOID_WIRED; drvdata->sidetone_max = CORSAIR_VOID_SIDETONE_MAX_WIRELESS; if (drvdata->is_wired) drvdata->sidetone_max = CORSAIR_VOID_SIDETONE_MAX_WIRED; /* Set initial values for no wireless headset attached */ /* If a headset is attached, it'll be prompted later */ corsair_void_set_unknown_wireless_data(drvdata); corsair_void_set_unknown_batt(drvdata); /* Receiver version won't be reset after init */ /* Headset version already set via set_unknown_wireless_data */ drvdata->fw_receiver_major = 0; drvdata->fw_receiver_minor = 0; ret = hid_parse(hid_dev); if (ret) { hid_err(hid_dev, "parse failed (reason: %d)\n", ret); return ret; } name = devm_kasprintf(drvdata->dev, GFP_KERNEL, "corsair-void-%d-battery", hid_dev->id); if (!name) return -ENOMEM; drvdata->battery_desc.name = name; drvdata->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; drvdata->battery_desc.properties = corsair_void_battery_props; drvdata->battery_desc.num_properties = ARRAY_SIZE(corsair_void_battery_props); drvdata->battery_desc.get_property = corsair_void_battery_get_property; drvdata->battery = NULL; INIT_WORK(&drvdata->battery_remove_work, corsair_void_battery_remove_work_handler); INIT_WORK(&drvdata->battery_add_work, corsair_void_battery_add_work_handler); ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex); if (ret) return ret; ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group); if (ret) return ret; /* Any failures after here will need to call hid_hw_stop */ ret = hid_hw_start(hid_dev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hid_dev, "hid_hw_start failed (reason: %d)\n", ret); goto failed_after_sysfs; } /* Refresh battery data, in case wireless headset is already connected */ INIT_DELAYED_WORK(&drvdata->delayed_status_work, corsair_void_status_work_handler); schedule_delayed_work(&drvdata->delayed_status_work, msecs_to_jiffies(100)); /* Refresh firmware versions */ INIT_DELAYED_WORK(&drvdata->delayed_firmware_work, corsair_void_firmware_work_handler); schedule_delayed_work(&drvdata->delayed_firmware_work, msecs_to_jiffies(100)); return 0; failed_after_sysfs: sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group); return ret; } static void corsair_void_remove(struct hid_device *hid_dev) { struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev); hid_hw_stop(hid_dev); cancel_work_sync(&drvdata->battery_remove_work); cancel_work_sync(&drvdata->battery_add_work); if (drvdata->battery) power_supply_unregister(drvdata->battery); cancel_delayed_work_sync(&drvdata->delayed_firmware_work); sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group); } static int corsair_void_raw_event(struct hid_device *hid_dev, struct hid_report *hid_report, u8 *data, int size) { struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev); bool was_connected = drvdata->connected; /* Description of packets are documented at the top of this file */ if (hid_report->id == CORSAIR_VOID_STATUS_REPORT_ID) { drvdata->mic_up = FIELD_GET(CORSAIR_VOID_MIC_MASK, data[2]); drvdata->connected = (data[3] == CORSAIR_VOID_WIRELESS_CONNECTED) || drvdata->is_wired; corsair_void_process_receiver(drvdata, FIELD_GET(CORSAIR_VOID_CAPACITY_MASK, data[2]), data[3], data[4]); } else if (hid_report->id == CORSAIR_VOID_FIRMWARE_REPORT_ID) { drvdata->fw_receiver_major = data[1]; drvdata->fw_receiver_minor = data[2]; drvdata->fw_headset_major = data[3]; drvdata->fw_headset_minor = data[4]; } /* Handle wireless headset connect / disconnect */ if ((was_connected != drvdata->connected) && !drvdata->is_wired) { if (drvdata->connected) corsair_void_headset_connected(drvdata); else corsair_void_headset_disconnected(drvdata); } return 0; } static const struct hid_device_id corsair_void_devices[] = { /* Corsair Void Wireless */ CORSAIR_VOID_WIRELESS_DEVICE(0x0a0c), CORSAIR_VOID_WIRELESS_DEVICE(0x0a2b), CORSAIR_VOID_WIRELESS_DEVICE(0x1b23), CORSAIR_VOID_WIRELESS_DEVICE(0x1b25), CORSAIR_VOID_WIRELESS_DEVICE(0x1b27), /* Corsair Void USB */ CORSAIR_VOID_WIRED_DEVICE(0x0a0f), CORSAIR_VOID_WIRED_DEVICE(0x1b1c), CORSAIR_VOID_WIRED_DEVICE(0x1b29), CORSAIR_VOID_WIRED_DEVICE(0x1b2a), /* Corsair Void Surround */ CORSAIR_VOID_WIRED_DEVICE(0x0a30), CORSAIR_VOID_WIRED_DEVICE(0x0a31), /* Corsair Void Pro Wireless */ CORSAIR_VOID_WIRELESS_DEVICE(0x0a14), CORSAIR_VOID_WIRELESS_DEVICE(0x0a16), CORSAIR_VOID_WIRELESS_DEVICE(0x0a1a), /* Corsair Void Pro USB */ CORSAIR_VOID_WIRED_DEVICE(0x0a17), CORSAIR_VOID_WIRED_DEVICE(0x0a1d), /* Corsair Void Pro Surround */ CORSAIR_VOID_WIRED_DEVICE(0x0a18), CORSAIR_VOID_WIRED_DEVICE(0x0a1e), CORSAIR_VOID_WIRED_DEVICE(0x0a1f), /* Corsair Void Elite Wireless */ CORSAIR_VOID_WIRELESS_DEVICE(0x0a51), CORSAIR_VOID_WIRELESS_DEVICE(0x0a55), CORSAIR_VOID_WIRELESS_DEVICE(0x0a75), /* Corsair Void Elite USB */ CORSAIR_VOID_WIRED_DEVICE(0x0a52), CORSAIR_VOID_WIRED_DEVICE(0x0a56), /* Corsair Void Elite Surround */ CORSAIR_VOID_WIRED_DEVICE(0x0a53), CORSAIR_VOID_WIRED_DEVICE(0x0a57), {} }; MODULE_DEVICE_TABLE(hid, corsair_void_devices); static struct hid_driver corsair_void_driver = { .name = "hid-corsair-void", .id_table = corsair_void_devices, .probe = corsair_void_probe, .remove = corsair_void_remove, .raw_event = corsair_void_raw_event, }; module_hid_driver(corsair_void_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stuart Hayhurst <stuart.a.hayhurst@gmail.com>"); MODULE_DESCRIPTION("HID driver for Corsair Void headsets");
13 13 14 14 14 14 13 13 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2008 ioogle, Inc. All rights reserved. * * Libata transport class. * * The ATA transport class contains common code to deal with ATA HBAs, * an approximated representation of ATA topologies in the driver model, * and various sysfs attributes to expose these topologies and management * interfaces to user-space. * * There are 3 objects defined in this class: * - ata_port * - ata_link * - ata_device * Each port has a link object. Each link can have up to two devices for PATA * and generally one for SATA. * If there is SATA port multiplier [PMP], 15 additional ata_link object are * created. * * These objects are created when the ata host is initialized and when a PMP is * found. They are removed only when the HBA is removed, cleaned before the * error handler runs. */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <scsi/scsi_transport.h> #include <linux/libata.h> #include <linux/hdreg.h> #include <linux/uaccess.h> #include <linux/pm_runtime.h> #include "libata.h" #include "libata-transport.h" #define ATA_PORT_ATTRS 3 #define ATA_LINK_ATTRS 3 #define ATA_DEV_ATTRS 9 struct scsi_transport_template; struct scsi_transport_template *ata_scsi_transport_template; struct ata_internal { struct scsi_transport_template t; struct device_attribute private_port_attrs[ATA_PORT_ATTRS]; struct device_attribute private_link_attrs[ATA_LINK_ATTRS]; struct device_attribute private_dev_attrs[ATA_DEV_ATTRS]; struct transport_container link_attr_cont; struct transport_container dev_attr_cont; /* * The array of null terminated pointers to attributes * needed by scsi_sysfs.c */ struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1]; struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1]; struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1]; }; #define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t) #define tdev_to_device(d) \ container_of((d), struct ata_device, tdev) #define transport_class_to_dev(dev) \ tdev_to_device((dev)->parent) #define tdev_to_link(d) \ container_of((d), struct ata_link, tdev) #define transport_class_to_link(dev) \ tdev_to_link((dev)->parent) #define tdev_to_port(d) \ container_of((d), struct ata_port, tdev) #define transport_class_to_port(dev) \ tdev_to_port((dev)->parent) /* * Hack to allow attributes of the same name in different objects. */ #define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute device_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) #define ata_bitfield_name_match(title, table) \ static ssize_t \ get_ata_##title##_names(u32 table_key, char *buf) \ { \ char *prefix = ""; \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value & table_key) { \ len += sprintf(buf + len, "%s%s", \ prefix, table[i].name); \ prefix = ", "; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } #define ata_bitfield_name_search(title, table) \ static ssize_t \ get_ata_##title##_names(u32 table_key, char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value == table_key) { \ len += sprintf(buf + len, "%s", \ table[i].name); \ break; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } static struct { u32 value; char *name; } ata_class_names[] = { { ATA_DEV_UNKNOWN, "unknown" }, { ATA_DEV_ATA, "ata" }, { ATA_DEV_ATA_UNSUP, "ata" }, { ATA_DEV_ATAPI, "atapi" }, { ATA_DEV_ATAPI_UNSUP, "atapi" }, { ATA_DEV_PMP, "pmp" }, { ATA_DEV_PMP_UNSUP, "pmp" }, { ATA_DEV_SEMB, "semb" }, { ATA_DEV_SEMB_UNSUP, "semb" }, { ATA_DEV_ZAC, "zac" }, { ATA_DEV_NONE, "none" } }; ata_bitfield_name_search(class, ata_class_names) static struct { u32 value; char *name; } ata_err_names[] = { { AC_ERR_DEV, "DeviceError" }, { AC_ERR_HSM, "HostStateMachineError" }, { AC_ERR_TIMEOUT, "Timeout" }, { AC_ERR_MEDIA, "MediaError" }, { AC_ERR_ATA_BUS, "BusError" }, { AC_ERR_HOST_BUS, "HostBusError" }, { AC_ERR_SYSTEM, "SystemError" }, { AC_ERR_INVALID, "InvalidArg" }, { AC_ERR_OTHER, "Unknown" }, { AC_ERR_NODEV_HINT, "NoDeviceHint" }, { AC_ERR_NCQ, "NCQError" } }; ata_bitfield_name_match(err, ata_err_names) static struct { u32 value; char *name; } ata_xfer_names[] = { { XFER_UDMA_7, "XFER_UDMA_7" }, { XFER_UDMA_6, "XFER_UDMA_6" }, { XFER_UDMA_5, "XFER_UDMA_5" }, { XFER_UDMA_4, "XFER_UDMA_4" }, { XFER_UDMA_3, "XFER_UDMA_3" }, { XFER_UDMA_2, "XFER_UDMA_2" }, { XFER_UDMA_1, "XFER_UDMA_1" }, { XFER_UDMA_0, "XFER_UDMA_0" }, { XFER_MW_DMA_4, "XFER_MW_DMA_4" }, { XFER_MW_DMA_3, "XFER_MW_DMA_3" }, { XFER_MW_DMA_2, "XFER_MW_DMA_2" }, { XFER_MW_DMA_1, "XFER_MW_DMA_1" }, { XFER_MW_DMA_0, "XFER_MW_DMA_0" }, { XFER_SW_DMA_2, "XFER_SW_DMA_2" }, { XFER_SW_DMA_1, "XFER_SW_DMA_1" }, { XFER_SW_DMA_0, "XFER_SW_DMA_0" }, { XFER_PIO_6, "XFER_PIO_6" }, { XFER_PIO_5, "XFER_PIO_5" }, { XFER_PIO_4, "XFER_PIO_4" }, { XFER_PIO_3, "XFER_PIO_3" }, { XFER_PIO_2, "XFER_PIO_2" }, { XFER_PIO_1, "XFER_PIO_1" }, { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes */ #define ata_port_show_simple(field, name, format_string, cast) \ static ssize_t \ show_ata_port_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_port *ap = transport_class_to_port(dev); \ \ return scnprintf(buf, 20, format_string, cast ap->field); \ } #define ata_port_simple_attr(field, name, format_string, type) \ ata_port_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL) ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int); ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long); /* We want the port_no sysfs attibute to start at 1 (ap->port_no starts at 0) */ ata_port_simple_attr(port_no + 1, port_no, "%u\n", unsigned int); static DECLARE_TRANSPORT_CLASS(ata_port_class, "ata_port", NULL, NULL, NULL); static void ata_tport_release(struct device *dev) { struct ata_port *ap = tdev_to_port(dev); ata_host_put(ap->host); } /** * ata_is_port -- check if a struct device represents a ATA port * @dev: device to check * * Returns: * %1 if the device represents a ATA Port, %0 else */ static int ata_is_port(const struct device *dev) { return dev->release == ata_tport_release; } static int ata_tport_match(struct attribute_container *cont, struct device *dev) { if (!ata_is_port(dev)) return 0; return &ata_scsi_transport_template->host_attrs.ac == cont; } /** * ata_tport_delete -- remove ATA PORT * @ap: ATA PORT to remove * * Removes the specified ATA PORT. Remove the associated link as well. */ void ata_tport_delete(struct ata_port *ap) { struct device *dev = &ap->tdev; ata_tlink_delete(&ap->link); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL_GPL(ata_tport_delete); static const struct device_type ata_port_sas_type = { .name = ATA_PORT_TYPE_NAME, }; /** ata_tport_add - initialize a transport ATA port structure * * @parent: parent device * @ap: existing ata_port structure * * Initialize a ATA port structure for sysfs. It will be added to the device * tree below the device specified by @parent which could be a PCI device. * * Returns %0 on success */ int ata_tport_add(struct device *parent, struct ata_port *ap) { int error; struct device *dev = &ap->tdev; device_initialize(dev); if (ap->flags & ATA_FLAG_SAS_HOST) dev->type = &ata_port_sas_type; else dev->type = &ata_port_type; dev->parent = parent; ata_host_get(ap->host); dev->release = ata_tport_release; dev_set_name(dev, "ata%d", ap->print_id); transport_setup_device(dev); ata_acpi_bind_port(ap); error = device_add(dev); if (error) { goto tport_err; } device_enable_async_suspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_forbid(dev); error = transport_add_device(dev); if (error) goto tport_transport_add_err; transport_configure_device(dev); error = ata_tlink_add(&ap->link); if (error) { goto tport_link_err; } return 0; tport_link_err: transport_remove_device(dev); tport_transport_add_err: device_del(dev); tport_err: transport_destroy_device(dev); put_device(dev); return error; } EXPORT_SYMBOL_GPL(ata_tport_add); /** * ata_port_classify - determine device type based on ATA-spec signature * @ap: ATA port device on which the classification should be run * @tf: ATA taskfile register set for device to be identified * * A wrapper around ata_dev_classify() to provide additional logging * * RETURNS: * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. */ unsigned int ata_port_classify(struct ata_port *ap, const struct ata_taskfile *tf) { int i; unsigned int class = ata_dev_classify(tf); /* Start with index '1' to skip the 'unknown' entry */ for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) { if (ata_class_names[i].value == class) { ata_port_dbg(ap, "found %s device by sig\n", ata_class_names[i].name); return class; } } ata_port_info(ap, "found unknown device (class %u)\n", class); return class; } EXPORT_SYMBOL_GPL(ata_port_classify); /* * ATA device attributes */ #define ata_dev_show_class(title, field) \ static ssize_t \ show_ata_dev_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ return get_ata_##title##_names(ata_dev->field, buf); \ } #define ata_dev_attr(title, field) \ ata_dev_show_class(title, field) \ static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL) ata_dev_attr(class, class); ata_dev_attr(xfer, pio_mode); ata_dev_attr(xfer, dma_mode); ata_dev_attr(xfer, xfer_mode); #define ata_dev_show_simple(field, format_string, cast) \ static ssize_t \ show_ata_dev_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ return scnprintf(buf, 20, format_string, cast ata_dev->field); \ } #define ata_dev_simple_attr(field, format_string, type) \ ata_dev_show_simple(field, format_string, (type)) \ static DEVICE_ATTR(field, S_IRUGO, \ show_ata_dev_##field, NULL) ata_dev_simple_attr(spdn_cnt, "%d\n", int); struct ata_show_ering_arg { char* buf; int written; }; static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) { struct ata_show_ering_arg* arg = void_arg; u64 seconds; u32 rem; seconds = div_u64_rem(ent->timestamp, HZ, &rem); arg->written += sprintf(arg->buf + arg->written, "[%5llu.%09lu]", seconds, rem * NSEC_PER_SEC / HZ); arg->written += get_ata_err_names(ent->err_mask, arg->buf + arg->written); return 0; } static ssize_t show_ata_dev_ering(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); struct ata_show_ering_arg arg = { buf, 0 }; ata_ering_map(&ata_dev->ering, ata_show_ering, &arg); return arg.written; } static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL); static ssize_t show_ata_dev_id(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); int written = 0, i = 0; if (ata_dev->class == ATA_DEV_PMP) return 0; for(i=0;i<ATA_ID_WORDS;i++) { written += scnprintf(buf+written, 20, "%04x%c", ata_dev->id[i], ((i+1) & 7) ? ' ' : '\n'); } return written; } static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL); static ssize_t show_ata_dev_gscr(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); int written = 0, i = 0; if (ata_dev->class != ATA_DEV_PMP) return 0; for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) { written += scnprintf(buf+written, 20, "%08x%c", ata_dev->gscr[i], ((i+1) & 3) ? ' ' : '\n'); } if (SATA_PMP_GSCR_DWORDS & 3) buf[written-1] = '\n'; return written; } static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL); static ssize_t show_ata_dev_trim(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); unsigned char *mode; if (!ata_id_has_trim(ata_dev->id)) mode = "unsupported"; else if (ata_dev->quirks & ATA_QUIRK_NOTRIM) mode = "forced_unsupported"; else if (ata_dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) mode = "forced_unqueued"; else if (ata_fpdma_dsm_supported(ata_dev)) mode = "queued"; else mode = "unqueued"; return scnprintf(buf, 20, "%s\n", mode); } static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL); static DECLARE_TRANSPORT_CLASS(ata_dev_class, "ata_device", NULL, NULL, NULL); static void ata_tdev_release(struct device *dev) { } /** * ata_is_ata_dev -- check if a struct device represents a ATA device * @dev: device to check * * Returns: * true if the device represents a ATA device, false otherwise */ static bool ata_is_ata_dev(const struct device *dev) { return dev->release == ata_tdev_release; } static int ata_tdev_match(struct attribute_container *cont, struct device *dev) { struct ata_internal *i = to_ata_internal(ata_scsi_transport_template); if (!ata_is_ata_dev(dev)) return 0; return &i->dev_attr_cont.ac == cont; } /** * ata_tdev_free -- free an ATA transport device * @dev: struct ata_device owning the transport device to free * * Free the ATA transport device for the specified ATA device. * * Note: * This function must only be called for a ATA transport device that has not * yet successfully been added using ata_tdev_add(). */ static void ata_tdev_free(struct ata_device *dev) { transport_destroy_device(&dev->tdev); put_device(&dev->tdev); } /** * ata_tdev_delete -- remove an ATA transport device * @ata_dev: struct ata_device owning the transport device to delete * * Removes the ATA transport device for the specified ATA device. */ static void ata_tdev_delete(struct ata_device *ata_dev) { struct device *dev = &ata_dev->tdev; transport_remove_device(dev); device_del(dev); ata_tdev_free(ata_dev); } /** * ata_tdev_add -- initialize an ATA transport device * @ata_dev: struct ata_device owning the transport device to add * * Initialize an ATA transport device for sysfs. It will be added in the * device tree below the ATA link device it belongs to. * * Returns %0 on success and a negative error code on error. */ static int ata_tdev_add(struct ata_device *ata_dev) { struct device *dev = &ata_dev->tdev; struct ata_link *link = ata_dev->link; struct ata_port *ap = link->ap; int error; device_initialize(dev); dev->parent = &link->tdev; dev->release = ata_tdev_release; if (ata_is_host_link(link)) dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); else dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); transport_setup_device(dev); ata_acpi_bind_dev(ata_dev); error = device_add(dev); if (error) { ata_tdev_free(ata_dev); return error; } error = transport_add_device(dev); if (error) { device_del(dev); ata_tdev_free(ata_dev); return error; } transport_configure_device(dev); return 0; } /* * ATA link attributes */ static int noop(int x) { return x; } #define ata_link_show_linkspeed(field, format) \ static ssize_t \ show_ata_link_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_link *link = transport_class_to_link(dev); \ \ return sprintf(buf, "%s\n", \ sata_spd_string(format(link->field))); \ } #define ata_link_linkspeed_attr(field, format) \ ata_link_show_linkspeed(field, format) \ static DEVICE_ATTR(field, 0444, show_ata_link_##field, NULL) ata_link_linkspeed_attr(hw_sata_spd_limit, fls); ata_link_linkspeed_attr(sata_spd_limit, fls); ata_link_linkspeed_attr(sata_spd, noop); static DECLARE_TRANSPORT_CLASS(ata_link_class, "ata_link", NULL, NULL, NULL); static void ata_tlink_release(struct device *dev) { } /** * ata_is_link -- check if a struct device represents a ATA link * @dev: device to check * * Returns: * true if the device represents a ATA link, false otherwise */ static bool ata_is_link(const struct device *dev) { return dev->release == ata_tlink_release; } static int ata_tlink_match(struct attribute_container *cont, struct device *dev) { struct ata_internal *i = to_ata_internal(ata_scsi_transport_template); if (!ata_is_link(dev)) return 0; return &i->link_attr_cont.ac == cont; } /** * ata_tlink_delete -- remove an ATA link transport device * @link: struct ata_link owning the link transport device to remove * * Removes the link transport device of the specified ATA link. This also * removes the ATA device(s) associated with the link as well. */ void ata_tlink_delete(struct ata_link *link) { struct device *dev = &link->tdev; struct ata_device *ata_dev; ata_for_each_dev(ata_dev, link, ALL) { ata_tdev_delete(ata_dev); } transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } /** * ata_tlink_add -- initialize an ATA link transport device * @link: struct ata_link owning the link transport device to initialize * * Initialize an ATA link transport device for sysfs. It will be added in the * device tree below the ATA port it belongs to. * * Returns %0 on success and a negative error code on error. */ int ata_tlink_add(struct ata_link *link) { struct device *dev = &link->tdev; struct ata_port *ap = link->ap; struct ata_device *ata_dev; int error; device_initialize(dev); dev->parent = &ap->tdev; dev->release = ata_tlink_release; if (ata_is_host_link(link)) dev_set_name(dev, "link%d", ap->print_id); else dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); transport_setup_device(dev); error = device_add(dev); if (error) goto tlink_err; error = transport_add_device(dev); if (error) goto tlink_transport_err; transport_configure_device(dev); ata_for_each_dev(ata_dev, link, ALL) { error = ata_tdev_add(ata_dev); if (error) goto tlink_dev_err; } return 0; tlink_dev_err: while (--ata_dev >= link->device) ata_tdev_delete(ata_dev); transport_remove_device(dev); tlink_transport_err: device_del(dev); tlink_err: transport_destroy_device(dev); put_device(dev); return error; } /* * Setup / Teardown code */ #define SETUP_TEMPLATE(attrb, field, perm, test) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_LINK_ATTRIBUTE(field) \ SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1) #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) #define SETUP_DEV_ATTRIBUTE(field) \ SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1) /** * ata_attach_transport -- instantiate ATA transport template */ struct scsi_transport_template *ata_attach_transport(void) { struct ata_internal *i; int count; i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL); if (!i) return NULL; i->t.eh_strategy_handler = ata_scsi_error; i->t.user_scan = ata_scsi_user_scan; i->t.host_attrs.ac.attrs = &i->port_attrs[0]; i->t.host_attrs.ac.class = &ata_port_class.class; i->t.host_attrs.ac.match = ata_tport_match; transport_container_register(&i->t.host_attrs); i->link_attr_cont.ac.class = &ata_link_class.class; i->link_attr_cont.ac.attrs = &i->link_attrs[0]; i->link_attr_cont.ac.match = ata_tlink_match; transport_container_register(&i->link_attr_cont); i->dev_attr_cont.ac.class = &ata_dev_class.class; i->dev_attr_cont.ac.attrs = &i->dev_attrs[0]; i->dev_attr_cont.ac.match = ata_tdev_match; transport_container_register(&i->dev_attr_cont); count = 0; SETUP_PORT_ATTRIBUTE(nr_pmp_links); SETUP_PORT_ATTRIBUTE(idle_irq); SETUP_PORT_ATTRIBUTE(port_no); BUG_ON(count > ATA_PORT_ATTRS); i->port_attrs[count] = NULL; count = 0; SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit); SETUP_LINK_ATTRIBUTE(sata_spd_limit); SETUP_LINK_ATTRIBUTE(sata_spd); BUG_ON(count > ATA_LINK_ATTRS); i->link_attrs[count] = NULL; count = 0; SETUP_DEV_ATTRIBUTE(class); SETUP_DEV_ATTRIBUTE(pio_mode); SETUP_DEV_ATTRIBUTE(dma_mode); SETUP_DEV_ATTRIBUTE(xfer_mode); SETUP_DEV_ATTRIBUTE(spdn_cnt); SETUP_DEV_ATTRIBUTE(ering); SETUP_DEV_ATTRIBUTE(id); SETUP_DEV_ATTRIBUTE(gscr); SETUP_DEV_ATTRIBUTE(trim); BUG_ON(count > ATA_DEV_ATTRS); i->dev_attrs[count] = NULL; return &i->t; } /** * ata_release_transport -- release ATA transport template instance * @t: transport template instance */ void ata_release_transport(struct scsi_transport_template *t) { struct ata_internal *i = to_ata_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->link_attr_cont); transport_container_unregister(&i->dev_attr_cont); kfree(i); } __init int libata_transport_init(void) { int error; error = transport_class_register(&ata_link_class); if (error) goto out_unregister_transport; error = transport_class_register(&ata_port_class); if (error) goto out_unregister_link; error = transport_class_register(&ata_dev_class); if (error) goto out_unregister_port; return 0; out_unregister_port: transport_class_unregister(&ata_port_class); out_unregister_link: transport_class_unregister(&ata_link_class); out_unregister_transport: return error; } void __exit libata_transport_exit(void) { transport_class_unregister(&ata_link_class); transport_class_unregister(&ata_port_class); transport_class_unregister(&ata_dev_class); }
20 20 3076 3073 3074 3155 3079 3152 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 // SPDX-License-Identifier: GPL-2.0+ /*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This file implements the usbfs/x/y files, where * x is the bus number and y the device number. * * It allows user space programs/"drivers" to communicate directly * with USB devices without intervening kernel driver. * * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery * (CAN-2005-3055) */ /*****************************************************************************/ #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/quirks.h> #include <linux/cdev.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> #include <linux/dma-mapping.h> #include <asm/byteorder.h> #include <linux/moduleparam.h> #include "usb.h" #ifdef CONFIG_PM #define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND #else #define MAYBE_CAP_SUSPEND 0 #endif #define USB_MAXBUS 64 #define USB_DEVICE_MAX (USB_MAXBUS * 128) #define USB_SG_SIZE 16384 /* split-size for large txs */ /* Mutual exclusion for ps->list in resume vs. release and remove */ static DEFINE_MUTEX(usbfs_mutex); struct usb_dev_state { struct list_head list; /* state list */ struct usb_device *dev; struct file *file; spinlock_t lock; /* protects the async urb lists */ struct list_head async_pending; struct list_head async_completed; struct list_head memory_list; wait_queue_head_t wait; /* wake up if a request completed */ wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */ unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; sigval_t disccontext; unsigned long ifclaimed; u32 disabled_bulk_eps; unsigned long interface_allowed_mask; int not_yet_resumed; bool suspend_allowed; bool privileges_dropped; }; struct usb_memory { struct list_head memlist; int vma_use_count; int urb_use_count; u32 size; void *mem; dma_addr_t dma_handle; unsigned long vm_start; struct usb_dev_state *ps; }; struct async { struct list_head asynclist; struct usb_dev_state *ps; struct pid *pid; const struct cred *cred; unsigned int signr; unsigned int ifnum; void __user *userbuffer; void __user *userurb; sigval_t userurb_sigval; struct urb *urb; struct usb_memory *usbm; unsigned int mem_usage; int status; u8 bulk_addr; u8 bulk_status; }; static bool usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); static unsigned usbfs_snoop_max = 65536; module_param(usbfs_snoop_max, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop_max, "maximum number of bytes to print while snooping"); #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ dev_info(dev, format, ## arg); \ } while (0) enum snoop_when { SUBMIT, COMPLETE }; #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ static u32 usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); /* Hard limit, necessary to avoid arithmetic overflow */ #define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) static DEFINE_SPINLOCK(usbfs_memory_usage_lock); static u64 usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ static int usbfs_increase_memory_usage(u64 amount) { u64 lim, total_mem; unsigned long flags; int ret; lim = READ_ONCE(usbfs_memory_mb); lim <<= 20; ret = 0; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); total_mem = usbfs_memory_usage + amount; if (lim > 0 && total_mem > lim) ret = -ENOMEM; else usbfs_memory_usage = total_mem; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); return ret; } /* Memory for a transfer is being deallocated */ static void usbfs_decrease_memory_usage(u64 amount) { unsigned long flags; spin_lock_irqsave(&usbfs_memory_usage_lock, flags); if (amount > usbfs_memory_usage) usbfs_memory_usage = 0; else usbfs_memory_usage -= amount; spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags); } static int connected(struct usb_dev_state *ps) { return (!list_empty(&ps->list) && ps->dev->state != USB_STATE_NOTATTACHED); } static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); --*count; if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) { list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); hcd_buffer_free_pages(hcd, usbm->size, usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); } else { spin_unlock_irqrestore(&ps->lock, flags); } } static void usbdev_vm_open(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; unsigned long flags; spin_lock_irqsave(&usbm->ps->lock, flags); ++usbm->vma_use_count; spin_unlock_irqrestore(&usbm->ps->lock, flags); } static void usbdev_vm_close(struct vm_area_struct *vma) { struct usb_memory *usbm = vma->vm_private_data; dec_usb_memory_use_count(usbm, &usbm->vma_use_count); } static const struct vm_operations_struct usbdev_vm_ops = { .open = usbdev_vm_open, .close = usbdev_vm_close }; static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_memory *usbm = NULL; struct usb_dev_state *ps = file->private_data; struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); if (ret) goto error; usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL); if (!usbm) { ret = -ENOMEM; goto error_decrease_mem; } mem = hcd_buffer_alloc_pages(hcd, size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; } memset(mem, 0, size); usbm->mem = mem; usbm->dma_handle = dma_handle; usbm->size = size; usbm->ps = ps; usbm->vm_start = vma->vm_start; usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); /* * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check * whether we are in such cases, and then use remap_pfn_range (or * dma_mmap_coherent) to map normal (or DMA) pages into the user * space, respectively. */ if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } else { if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); return -EAGAIN; } } vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &usbdev_vm_ops; vma->vm_private_data = usbm; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&usbm->memlist, &ps->memory_list); spin_unlock_irqrestore(&ps->lock, flags); return 0; error_free_usbm: kfree(usbm); error_decrease_mem: usbfs_decrease_memory_usage(size + sizeof(struct usb_memory)); error: return ret; } static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; unsigned len; loff_t pos; int i; pos = *ppos; usb_lock_device(dev); if (!connected(ps)) { ret = -ENODEV; goto err; } else if (pos < 0) { ret = -EINVAL; goto err; } if (pos < sizeof(struct usb_device_descriptor)) { /* 18 bytes - fits on the stack */ struct usb_device_descriptor temp_desc; memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor)); le16_to_cpus(&temp_desc.bcdUSB); le16_to_cpus(&temp_desc.idVendor); le16_to_cpus(&temp_desc.idProduct); le16_to_cpus(&temp_desc.bcdDevice); len = sizeof(struct usb_device_descriptor) - pos; if (len > nbytes) len = nbytes; if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) { ret = -EFAULT; goto err; } *ppos += len; buf += len; nbytes -= len; ret += len; } pos = sizeof(struct usb_device_descriptor); for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; unsigned int length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ unsigned alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); len = length - (*ppos - pos); if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { alloclen -= (*ppos - pos); if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { ret = -EFAULT; goto err; } } *ppos += len; buf += len; nbytes -= len; ret += len; } pos += length; } err: usb_unlock_device(dev); return ret; } /* * async list handling */ static struct async *alloc_async(unsigned int numisoframes) { struct async *as; as = kzalloc(sizeof(struct async), GFP_KERNEL); if (!as) return NULL; as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL); if (!as->urb) { kfree(as); return NULL; } return as; } static void free_async(struct async *as) { int i; put_pid(as->pid); if (as->cred) put_cred(as->cred); for (i = 0; i < as->urb->num_sgs; i++) { if (sg_page(&as->urb->sg[i])) kfree(sg_virt(&as->urb->sg[i])); } kfree(as->urb->sg); if (as->usbm == NULL) kfree(as->urb->transfer_buffer); else dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count); kfree(as->urb->setup_packet); usb_free_urb(as->urb); usbfs_decrease_memory_usage(as->mem_usage); kfree(as); } static void async_newpending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&as->asynclist, &ps->async_pending); spin_unlock_irqrestore(&ps->lock, flags); } static void async_removepending(struct async *as) { struct usb_dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); } static struct async *async_getcompleted(struct usb_dev_state *ps) { unsigned long flags; struct async *as = NULL; spin_lock_irqsave(&ps->lock, flags); if (!list_empty(&ps->async_completed)) { as = list_entry(ps->async_completed.next, struct async, asynclist); list_del_init(&as->asynclist); } spin_unlock_irqrestore(&ps->lock, flags); return as; } static struct async *async_getpending(struct usb_dev_state *ps, void __user *userurb) { struct async *as; list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); return as; } return NULL; } static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, int timeout_or_status, enum snoop_when when, unsigned char *data, unsigned data_len) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; int ep; const char *t, *d; if (!usbfs_snoop) return; ep = usb_pipeendpoint(pipe); t = types[usb_pipetype(pipe)]; d = dirs[!!usb_pipein(pipe)]; if (userurb) { /* Async */ if (when == SUBMIT) dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); } else { if (when == SUBMIT) dev_info(&udev->dev, "ep%d %s-%s, length %u, " "timeout %d\n", ep, t, d, length, timeout_or_status); else dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " "status %d\n", ep, t, d, length, timeout_or_status); } data_len = min(data_len, usbfs_snoop_max); if (data && data_len > 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, data, data_len, 1); } } static void snoop_urb_data(struct urb *urb, unsigned len) { int i, size; len = min(len, usbfs_snoop_max); if (!usbfs_snoop || len == 0) return; if (urb->num_sgs == 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, urb->transfer_buffer, len, 1); return; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, sg_virt(&urb->sg[i]), size, 1); len -= size; } } static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) { unsigned i, len, size; if (urb->number_of_packets > 0) /* Isochronous */ len = urb->transfer_buffer_length; else /* Non-Isoc */ len = urb->actual_length; if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; return 0; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; if (copy_to_user(userbuffer, sg_virt(&urb->sg[i]), size)) return -EFAULT; userbuffer += size; len -= size; } return 0; } #define AS_CONTINUATION 1 #define AS_UNLINK 2 static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { struct urb *urb; struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not * including the first one without AS_CONTINUATION. If such an * URB is encountered then a new transfer has already started so * the endpoint doesn't need to be disabled; otherwise it does. */ list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_addr == bulk_addr) { if (as->bulk_status != AS_CONTINUATION) goto rescan; as->bulk_status = AS_UNLINK; as->bulk_addr = 0; } } ps->disabled_bulk_eps |= (1 << bulk_addr); /* Now carefully unlink all the marked pending URBs */ rescan: list_for_each_entry_reverse(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ urb = as->urb; usb_get_urb(urb); spin_unlock(&ps->lock); /* Allow completions */ usb_unlink_urb(urb); usb_put_urb(urb); spin_lock(&ps->lock); goto rescan; } } } static void async_completed(struct urb *urb) { struct async *as = urb->context; struct usb_dev_state *ps = as->ps; struct pid *pid = NULL; const struct cred *cred = NULL; unsigned long flags; sigval_t addr; int signr, errno; spin_lock_irqsave(&ps->lock, flags); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; if (signr) { errno = as->status; addr = as->userurb_sigval; pid = get_pid(as->pid); cred = get_cred(as->cred); } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE, NULL, 0); if (usb_urb_dir_in(urb)) snoop_urb_data(urb, urb->actual_length); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); wake_up(&ps->wait); spin_unlock_irqrestore(&ps->lock, flags); if (signr) { kill_pid_usb_asyncio(signr, errno, addr, pid, cred); put_pid(pid); put_cred(cred); } } static void destroy_async(struct usb_dev_state *ps, struct list_head *list) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); while (!list_empty(list)) { as = list_last_entry(list, struct async, asynclist); list_del_init(&as->asynclist); urb = as->urb; usb_get_urb(urb); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); } static void destroy_async_on_interface(struct usb_dev_state *ps, unsigned int ifnum) { struct list_head *p, *q, hitlist; unsigned long flags; INIT_LIST_HEAD(&hitlist); spin_lock_irqsave(&ps->lock, flags); list_for_each_safe(p, q, &ps->async_pending) if (ifnum == list_entry(p, struct async, asynclist)->ifnum) list_move_tail(p, &hitlist); spin_unlock_irqrestore(&ps->lock, flags); destroy_async(ps, &hitlist); } static void destroy_all_async(struct usb_dev_state *ps) { destroy_async(ps, &ps->async_pending); } /* * interface claims are made only at the request of user level code, * which can also release them (explicitly or by closing files). * they're also undone when devices disconnect. */ static int driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { return -ENODEV; } static void driver_disconnect(struct usb_interface *intf) { struct usb_dev_state *ps = usb_get_intfdata(intf); unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber; if (!ps) return; /* NOTE: this relies on usbcore having canceled and completed * all pending I/O requests; 2.6 does that. */ if (likely(ifnum < 8*sizeof(ps->ifclaimed))) clear_bit(ifnum, &ps->ifclaimed); else dev_warn(&intf->dev, "interface number %u out of range\n", ifnum); usb_set_intfdata(intf, NULL); /* force async requests to complete */ destroy_async_on_interface(ps, ifnum); } /* We don't care about suspend/resume of claimed interfaces */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; } static int driver_resume(struct usb_interface *intf) { return 0; } #ifdef CONFIG_PM /* The following routines apply to the entire device, not interfaces */ void usbfs_notify_suspend(struct usb_device *udev) { /* We don't need to handle this */ } void usbfs_notify_resume(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous remove or release */ mutex_lock(&usbfs_mutex); list_for_each_entry(ps, &udev->filelist, list) { WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); } mutex_unlock(&usbfs_mutex); } #endif struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, .supports_autosuspend = 1, }; static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev = ps->dev; struct usb_interface *intf; int err; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; /* already claimed */ if (test_bit(ifnum, &ps->ifclaimed)) return 0; if (ps->privileges_dropped && !test_bit(ifnum, &ps->interface_allowed_mask)) return -EACCES; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else { unsigned int old_suppress; /* suppress uevents while claiming interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); err = usb_driver_claim_interface(&usbfs_driver, intf, ps); dev_set_uevent_suppress(&intf->dev, old_suppress); } if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; } static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum) { struct usb_device *dev; struct usb_interface *intf; int err; err = -EINVAL; if (ifnum >= 8*sizeof(ps->ifclaimed)) return err; dev = ps->dev; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { unsigned int old_suppress; /* suppress uevents while releasing interface */ old_suppress = dev_get_uevent_suppress(&intf->dev); dev_set_uevent_suppress(&intf->dev, 1); usb_driver_release_interface(&usbfs_driver, intf); dev_set_uevent_suppress(&intf->dev, old_suppress); err = 0; } return err; } static int checkintf(struct usb_dev_state *ps, unsigned int ifnum) { if (ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; if (test_bit(ifnum, &ps->ifclaimed)) return 0; /* if not yet claimed, claim it for the driver */ dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim " "interface %u before use\n", task_pid_nr(current), current->comm, ifnum); return claimintf(ps, ifnum); } static int findintfep(struct usb_device *dev, unsigned int ep) { unsigned int i, j, e; struct usb_interface *intf; struct usb_host_interface *alts; struct usb_endpoint_descriptor *endpt; if (ep & ~(USB_DIR_IN|0xf)) return -EINVAL; if (!dev->actconfig) return -ESRCH; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf = dev->actconfig->interface[i]; for (j = 0; j < intf->num_altsetting; j++) { alts = &intf->altsetting[j]; for (e = 0; e < alts->desc.bNumEndpoints; e++) { endpt = &alts->endpoint[e].desc; if (endpt->bEndpointAddress == ep) return alts->desc.bInterfaceNumber; } } } return -ENOENT; } static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype, unsigned int request, unsigned int index) { int ret = 0; struct usb_host_interface *alt_setting; if (ps->dev->state != USB_STATE_UNAUTHENTICATED && ps->dev->state != USB_STATE_ADDRESS && ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype)) return 0; /* * check for the special corner case 'get_device_id' in the printer * class specification, which we always want to allow as it is used * to query things like ink level, etc. */ if (requesttype == 0xa1 && request == 0) { alt_setting = usb_find_alt_setting(ps->dev->actconfig, index >> 8, index & 0xff); if (alt_setting && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER) return 0; } index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: if ((index & ~USB_DIR_IN) == 0) return 0; ret = findintfep(ps->dev, index); if (ret < 0) { /* * Some not fully compliant Win apps seem to get * index wrong and have the endpoint number here * rather than the endpoint address (with the * correct direction). Win does let this through, * so we'll not reject it here but leave it to * the device to not break KVM. But we warn. */ ret = findintfep(ps->dev, index ^ 0x80); if (ret >= 0) dev_info(&ps->dev->dev, "%s: process %i (%s) requesting ep %02x but needs %02x\n", __func__, task_pid_nr(current), current->comm, index, index ^ 0x80); } if (ret >= 0) ret = checkintf(ps, ret); break; case USB_RECIP_INTERFACE: ret = checkintf(ps, index); break; } return ret; } static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev, unsigned char ep) { if (ep & USB_ENDPOINT_DIR_MASK) return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK]; else return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK]; } static int parse_usbdevfs_streams(struct usb_dev_state *ps, struct usbdevfs_streams __user *streams, unsigned int *num_streams_ret, unsigned int *num_eps_ret, struct usb_host_endpoint ***eps_ret, struct usb_interface **intf_ret) { unsigned int i, num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf = NULL; unsigned char ep; int ifnum, ret; if (get_user(num_streams, &streams->num_streams) || get_user(num_eps, &streams->num_eps)) return -EFAULT; if (num_eps < 1 || num_eps > USB_MAXENDPOINTS) return -EINVAL; /* The XHCI controller allows max 2 ^ 16 streams */ if (num_streams_ret && (num_streams < 2 || num_streams > 65536)) return -EINVAL; eps = kmalloc_array(num_eps, sizeof(*eps), GFP_KERNEL); if (!eps) return -ENOMEM; for (i = 0; i < num_eps; i++) { if (get_user(ep, &streams->eps[i])) { ret = -EFAULT; goto error; } eps[i] = ep_to_host_endpoint(ps->dev, ep); if (!eps[i]) { ret = -EINVAL; goto error; } /* usb_alloc/free_streams operate on an usb_interface */ ifnum = findintfep(ps->dev, ep); if (ifnum < 0) { ret = ifnum; goto error; } if (i == 0) { ret = checkintf(ps, ifnum); if (ret < 0) goto error; intf = usb_ifnum_to_if(ps->dev, ifnum); } else { /* Verify all eps belong to the same interface */ if (ifnum != intf->altsetting->desc.bInterfaceNumber) { ret = -EINVAL; goto error; } } } if (num_streams_ret) *num_streams_ret = num_streams; *num_eps_ret = num_eps; *eps_ret = eps; *intf_ret = intf; return 0; error: kfree(eps); return ret; } static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; dev = bus_find_device_by_devt(&usb_bus_type, devt); if (!dev) return NULL; return to_usb_device(dev); } /* * file operations */ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct usb_dev_state *ps; int ret; ret = -ENOMEM; ps = kzalloc(sizeof(struct usb_dev_state), GFP_KERNEL); if (!ps) goto out_free_ps; ret = -ENODEV; /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); if (!dev) goto out_free_ps; usb_lock_device(dev); if (dev->state == USB_STATE_NOTATTACHED) goto out_unlock_device; ret = usb_autoresume_device(dev); if (ret) goto out_unlock_device; ps->dev = dev; ps->file = file; ps->interface_allowed_mask = 0xFFFFFFFF; /* 32 bits */ spin_lock_init(&ps->lock); INIT_LIST_HEAD(&ps->list); INIT_LIST_HEAD(&ps->async_pending); INIT_LIST_HEAD(&ps->async_completed); INIT_LIST_HEAD(&ps->memory_list); init_waitqueue_head(&ps->wait); init_waitqueue_head(&ps->wait_for_resume); ps->disc_pid = get_pid(task_pid(current)); ps->cred = get_current_cred(); smp_wmb(); /* Can't race with resume; the device is already active */ list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; usb_unlock_device(dev); snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); return ret; out_unlock_device: usb_unlock_device(dev); usb_put_dev(dev); out_free_ps: kfree(ps); return ret; } static int usbdev_release(struct inode *inode, struct file *file) { struct usb_dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; unsigned int ifnum; struct async *as; usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); list_del_init(&ps->list); mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { if (test_bit(ifnum, &ps->ifclaimed)) releaseintf(ps, ifnum); } destroy_all_async(ps); if (!ps->suspend_allowed) usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); put_cred(ps->cred); as = async_getcompleted(ps); while (as) { free_async(as); as = async_getcompleted(ps); } kfree(ps); return 0; } static void usbfs_blocking_completion(struct urb *urb) { complete((struct completion *) urb->context); } /* * Much like usb_start_wait_urb, but returns status separately from * actual_length and uses a killable wait. */ static int usbfs_start_wait_urb(struct urb *urb, int timeout, unsigned int *actlen) { DECLARE_COMPLETION_ONSTACK(ctx); unsigned long expire; int rc; urb->context = &ctx; urb->complete = usbfs_blocking_completion; *actlen = 0; rc = usb_submit_urb(urb, GFP_KERNEL); if (unlikely(rc)) return rc; expire = (timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT); rc = wait_for_completion_killable_timeout(&ctx, expire); if (rc <= 0) { usb_kill_urb(urb); *actlen = urb->actual_length; if (urb->status != -ENOENT) ; /* Completed before it was killed */ else if (rc < 0) return -EINTR; else return -ETIMEDOUT; } *actlen = urb->actual_length; return urb->status; } static int do_proc_control(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer *ctrl) { struct usb_device *dev = ps->dev; unsigned int tmo; unsigned char *tbuf; unsigned int wLength, actlen; int i, pipe, ret; struct urb *urb = NULL; struct usb_ctrlrequest *dr = NULL; ret = check_ctrlrecip(ps, ctrl->bRequestType, ctrl->bRequest, ctrl->wIndex); if (ret) return ret; wLength = ctrl->wLength; /* To suppress 64k PAGE_SIZE warning */ if (wLength > PAGE_SIZE) return -EINVAL; ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); if (ret) return ret; ret = -ENOMEM; tbuf = (unsigned char *)__get_free_page(GFP_KERNEL); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) goto done; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) goto done; dr->bRequestType = ctrl->bRequestType; dr->bRequest = ctrl->bRequest; dr->wValue = cpu_to_le16(ctrl->wValue); dr->wIndex = cpu_to_le16(ctrl->wIndex); dr->wLength = cpu_to_le16(ctrl->wLength); tmo = ctrl->timeout; snoop(&dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, ctrl->wIndex, ctrl->wLength); if ((ctrl->bRequestType & USB_DIR_IN) && wLength) { pipe = usb_rcvctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen); if (!i && actlen) { if (copy_to_user(ctrl->data, tbuf, actlen)) { ret = -EFAULT; goto done; } } } else { if (wLength) { if (copy_from_user(tbuf, ctrl->data, wLength)) { ret = -EFAULT; goto done; } } pipe = usb_sndctrlpipe(dev, 0); usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf, wLength, NULL, NULL); snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, tbuf, wLength); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0); } if (i < 0 && i != -EPIPE) { dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL " "failed cmd %s rqt %u rq %u len %u ret %d\n", current->comm, ctrl->bRequestType, ctrl->bRequest, ctrl->wLength, i); } ret = (i < 0 ? i : actlen); done: kfree(dr); usb_free_urb(urb); free_page((unsigned long) tbuf); usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); return ret; } static int proc_control(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ctrltransfer ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return do_proc_control(ps, &ctrl); } static int do_proc_bulk(struct usb_dev_state *ps, struct usbdevfs_bulktransfer *bulk) { struct usb_device *dev = ps->dev; unsigned int tmo, len1, len2, pipe; unsigned char *tbuf; int i, ret; struct urb *urb = NULL; struct usb_host_endpoint *ep; ret = findintfep(ps->dev, bulk->ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; len1 = bulk->len; if (len1 >= (INT_MAX - sizeof(struct urb))) return -EINVAL; if (bulk->ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(dev, bulk->ep & 0x7f); else pipe = usb_sndbulkpipe(dev, bulk->ep & 0x7f); ep = usb_pipe_endpoint(dev, pipe); if (!ep || !usb_endpoint_maxp(&ep->desc)) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; /* * len1 can be almost arbitrarily large. Don't WARN if it's * too big, just fail the request. */ ret = -ENOMEM; tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN); if (!tbuf) goto done; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto done; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, dev, pipe, tbuf, len1, NULL, NULL, ep->desc.bInterval); } else { usb_fill_bulk_urb(urb, dev, pipe, tbuf, len1, NULL, NULL); } tmo = bulk->timeout; if (bulk->ep & 0x80) { snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2); if (!i && len2) { if (copy_to_user(bulk->data, tbuf, len2)) { ret = -EFAULT; goto done; } } } else { if (len1) { if (copy_from_user(tbuf, bulk->data, len1)) { ret = -EFAULT; goto done; } } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1); usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &len2); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0); } ret = (i < 0 ? i : len2); done: usb_free_urb(urb); kfree(tbuf); usbfs_decrease_memory_usage(len1 + sizeof(struct urb)); return ret; } static int proc_bulk(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_bulktransfer bulk; if (copy_from_user(&bulk, arg, sizeof(bulk))) return -EFAULT; return do_proc_bulk(ps, &bulk); } static void check_reset_of_active_ep(struct usb_device *udev, unsigned int epnum, char *ioctl_name) { struct usb_host_endpoint **eps; struct usb_host_endpoint *ep; eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out; ep = eps[epnum & 0x0f]; if (ep && !list_empty(&ep->urb_list)) dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n", task_pid_nr(current), current->comm, ioctl_name, epnum); } static int proc_resetep(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "RESETEP"); usb_reset_endpoint(ps->dev, ep); return 0; } static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg) { unsigned int ep; int pipe; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT"); if (ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); else pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); return usb_clear_halt(ps->dev, pipe); } static int proc_getdriver(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_getdriver gd; struct usb_interface *intf; int ret; if (copy_from_user(&gd, arg, sizeof(gd))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, gd.interface); if (!intf || !intf->dev.driver) ret = -ENODATA; else { strscpy(gd.driver, intf->dev.driver->name, sizeof(gd.driver)); ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); } return ret; } static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci; memset(&ci, 0, sizeof(ci)); ci.devnum = ps->dev->devnum; ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; } static int proc_conninfo_ex(struct usb_dev_state *ps, void __user *arg, size_t size) { struct usbdevfs_conninfo_ex ci; struct usb_device *udev = ps->dev; if (size < sizeof(ci.size)) return -EINVAL; memset(&ci, 0, sizeof(ci)); ci.size = sizeof(ci); ci.busnum = udev->bus->busnum; ci.devnum = udev->devnum; ci.speed = udev->speed; while (udev && udev->portnum != 0) { if (++ci.num_ports <= ARRAY_SIZE(ci.ports)) ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports] = udev->portnum; udev = udev->parent; } if (ci.num_ports < ARRAY_SIZE(ci.ports)) memmove(&ci.ports[0], &ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports], ci.num_ports); if (copy_to_user(arg, &ci, min(sizeof(ci), size))) return -EFAULT; return 0; } static int proc_resetdevice(struct usb_dev_state *ps) { struct usb_host_config *actconfig = ps->dev->actconfig; struct usb_interface *interface; int i, number; /* Don't allow a device reset if the process has dropped the * privilege to do such things and any of the interfaces are * currently claimed. */ if (ps->privileges_dropped && actconfig) { for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { interface = actconfig->interface[i]; number = interface->cur_altsetting->desc.bInterfaceNumber; if (usb_interface_claimed(interface) && !test_bit(number, &ps->ifclaimed)) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s while '%s' resets device\n", number, interface->dev.driver->name, current->comm); return -EACCES; } } } return usb_reset_device(ps->dev); } static int proc_setintf(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_setinterface setintf; int ret; if (copy_from_user(&setintf, arg, sizeof(setintf))) return -EFAULT; ret = checkintf(ps, setintf.interface); if (ret) return ret; destroy_async_on_interface(ps, setintf.interface); return usb_set_interface(ps->dev, setintf.interface, setintf.altsetting); } static int proc_setconfig(struct usb_dev_state *ps, void __user *arg) { int u; int status = 0; struct usb_host_config *actconfig; if (get_user(u, (int __user *)arg)) return -EFAULT; actconfig = ps->dev->actconfig; /* Don't touch the device if any interfaces are claimed. * It could interfere with other drivers' operations, and if * an interface is claimed by usbfs it could easily deadlock. */ if (actconfig) { int i; for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { if (usb_interface_claimed(actconfig->interface[i])) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s " "while '%s' sets config #%d\n", actconfig->interface[i] ->cur_altsetting ->desc.bInterfaceNumber, actconfig->interface[i] ->dev.driver->name, current->comm, u); status = -EBUSY; break; } } } /* SET_CONFIGURATION is often abused as a "cheap" driver reset, * so avoid usb_set_configuration()'s kick to sysfs */ if (status == 0) { if (actconfig && actconfig->desc.bConfigurationValue == u) status = usb_reset_configuration(ps->dev); else status = usb_set_configuration(ps->dev, u); } return status; } static struct usb_memory * find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb) { struct usb_memory *usbm = NULL, *iter; unsigned long flags; unsigned long uurb_start = (unsigned long)uurb->buffer; spin_lock_irqsave(&ps->lock, flags); list_for_each_entry(iter, &ps->memory_list, memlist) { if (uurb_start >= iter->vm_start && uurb_start < iter->vm_start + iter->size) { if (uurb->buffer_length > iter->vm_start + iter->size - uurb_start) { usbm = ERR_PTR(-EINVAL); } else { usbm = iter; usbm->urb_use_count++; } break; } } spin_unlock_irqrestore(&ps->lock, flags); return usbm; } static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb, struct usbdevfs_iso_packet_desc __user *iso_frame_desc, void __user *arg, sigval_t userurb_sigval) { struct usbdevfs_iso_packet_desc *isopkt = NULL; struct usb_host_endpoint *ep; struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; int i, ret, num_sgs = 0, ifnum = -1; int number_of_packets = 0; unsigned int stream_id = 0; void *buf; bool is_in; bool allow_short = false; bool allow_zero = false; unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT; /* USBDEVFS_URB_ISO_ASAP is a special case */ if (uurb->type == USBDEVFS_URB_TYPE_ISO) mask |= USBDEVFS_URB_ISO_ASAP; if (uurb->flags & ~mask) return -EINVAL; if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { ifnum = findintfep(ps->dev, uurb->endpoint); if (ifnum < 0) return ifnum; ret = checkintf(ps, ifnum); if (ret) return ret; } ep = ep_to_host_endpoint(ps->dev, uurb->endpoint); if (!ep) return -ENOENT; is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0; u = 0; switch (uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ if (uurb->buffer_length < 8) return -EINVAL; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) return -ENOMEM; if (copy_from_user(dr, uurb->buffer, 8)) { ret = -EFAULT; goto error; } if (uurb->buffer_length < (le16_to_cpu(dr->wLength) + 8)) { ret = -EINVAL; goto error; } ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest, le16_to_cpu(dr->wIndex)); if (ret) goto error; uurb->buffer_length = le16_to_cpu(dr->wLength); uurb->buffer += 8; if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) { is_in = true; uurb->endpoint |= USB_DIR_IN; } else { is_in = false; uurb->endpoint &= ~USB_DIR_IN; } if (is_in) allow_short = true; snoop(&ps->dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", dr->bRequestType, dr->bRequest, __le16_to_cpu(dr->wValue), __le16_to_cpu(dr->wIndex), __le16_to_cpu(dr->wLength)); u = sizeof(struct usb_ctrlrequest); break; case USBDEVFS_URB_TYPE_BULK: if (!is_in) allow_zero = true; else allow_short = true; switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; case USB_ENDPOINT_XFER_INT: /* allow single-shot interrupt transfers */ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT; goto interrupt_urb; } num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE); if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize) num_sgs = 0; if (ep->streams) stream_id = uurb->stream_id; break; case USBDEVFS_URB_TYPE_INTERRUPT: if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: if (!is_in) allow_zero = true; else allow_short = true; break; case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) return -EINVAL; if (!usb_endpoint_xfer_isoc(&ep->desc)) return -EINVAL; number_of_packets = uurb->number_of_packets; isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * number_of_packets; isopkt = memdup_user(iso_frame_desc, isofrmlen); if (IS_ERR(isopkt)) { ret = PTR_ERR(isopkt); isopkt = NULL; goto error; } for (totlen = u = 0; u < number_of_packets; u++) { /* * arbitrary limit need for USB 3.1 Gen2 * sizemax: 96 DPs at SSP, 96 * 1024 = 98304 */ if (isopkt[u].length > 98304) { ret = -EINVAL; goto error; } totlen += isopkt[u].length; } u *= sizeof(struct usb_iso_packet_descriptor); uurb->buffer_length = totlen; break; default: return -EINVAL; } if (uurb->buffer_length > 0 && !access_ok(uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } as = alloc_async(number_of_packets); if (!as) { ret = -ENOMEM; goto error; } as->usbm = find_memory_area(ps, uurb); if (IS_ERR(as->usbm)) { ret = PTR_ERR(as->usbm); as->usbm = NULL; goto error; } /* do not use SG buffers when memory mapped segments * are in use */ if (as->usbm) num_sgs = 0; u += sizeof(struct async) + sizeof(struct urb) + (as->usbm ? 0 : uurb->buffer_length) + num_sgs * sizeof(struct scatterlist); ret = usbfs_increase_memory_usage(u); if (ret) goto error; as->mem_usage = u; if (num_sgs) { as->urb->sg = kmalloc_array(num_sgs, sizeof(struct scatterlist), GFP_KERNEL | __GFP_NOWARN); if (!as->urb->sg) { ret = -ENOMEM; goto error; } as->urb->num_sgs = num_sgs; sg_init_table(as->urb->sg, as->urb->num_sgs); totlen = uurb->buffer_length; for (i = 0; i < as->urb->num_sgs; i++) { u = (totlen > USB_SG_SIZE) ? USB_SG_SIZE : totlen; buf = kmalloc(u, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto error; } sg_set_buf(&as->urb->sg[i], buf, u); if (!is_in) { if (copy_from_user(buf, uurb->buffer, u)) { ret = -EFAULT; goto error; } uurb->buffer += u; } totlen -= u; } } else if (uurb->buffer_length > 0) { if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_buffer = as->usbm->mem + (uurb_start - as->usbm->vm_start); } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL | __GFP_NOWARN); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; } if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) { /* * Isochronous input data may end up being * discontiguous if some of the packets are * short. Clear the buffer so that the gaps * don't leak kernel data to userspace. */ memset(as->urb->transfer_buffer, 0, uurb->buffer_length); } } } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); /* This tedious sequence is necessary because the URB_* flags * are internal to the kernel and subject to change, whereas * the USBDEVFS_URB_* flags are a user API and must not be changed. */ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = number_of_packets; as->urb->stream_id = stream_id; if (ep->desc.bInterval) { if (uurb->type == USBDEVFS_URB_TYPE_ISO || ps->dev->speed == USB_SPEED_HIGH || ps->dev->speed >= USB_SPEED_SUPER) as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); else as->urb->interval = ep->desc.bInterval; } as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < number_of_packets; u++) { as->urb->iso_frame_desc[u].offset = totlen; as->urb->iso_frame_desc[u].length = isopkt[u].length; totlen += isopkt[u].length; } kfree(isopkt); isopkt = NULL; as->ps = ps; as->userurb = arg; as->userurb_sigval = userurb_sigval; if (as->usbm) { unsigned long uurb_start = (unsigned long)uurb->buffer; as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; as->urb->transfer_dma = as->usbm->dma_handle + (uurb_start - as->usbm->vm_start); } else if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); as->cred = get_current_cred(); snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT, NULL, 0); if (!is_in) snoop_urb_data(as->urb, as->urb->transfer_buffer_length); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { spin_lock_irq(&ps->lock); /* Not exactly the endpoint address; the direction bit is * shifted to the 0x10 position so that the value will be * between 0 and 31. */ as->bulk_addr = usb_endpoint_num(&ep->desc) | ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3); /* If this bulk URB is the start of a new transfer, re-enable * the endpoint. Otherwise mark it as a continuation URB. */ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) as->bulk_status = AS_CONTINUATION; else ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); /* Don't accept continuation URBs if the endpoint is * disabled because of an earlier error. */ if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) ret = -EREMOTEIO; else ret = usb_submit_urb(as->urb, GFP_ATOMIC); spin_unlock_irq(&ps->lock); } else { ret = usb_submit_urb(as->urb, GFP_KERNEL); } if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, 0, ret, COMPLETE, NULL, 0); async_removepending(as); goto error; } return 0; error: kfree(isopkt); kfree(dr); if (as) free_async(as); return ret; } static int proc_submiturb(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (copy_from_user(&uurb, arg, sizeof(uurb))) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_ptr = arg; return proc_do_submiturb(ps, &uurb, (((struct usbdevfs_urb __user *)arg)->iso_frame_desc), arg, userurb_sigval); } static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); as = async_getpending(ps, arg); if (!as) { spin_unlock_irqrestore(&ps->lock, flags); return -EINVAL; } urb = as->urb; usb_get_urb(urb); spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); return 0; } static void compute_isochronous_actual_length(struct urb *urb) { unsigned int i; if (urb->number_of_packets > 0) { urb->actual_length = 0; for (i = 0; i < urb->number_of_packets; i++) urb->actual_length += urb->iso_frame_desc[i].actual_length; } } static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; } if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) goto err_out; if (put_user(urb->error_count, &userurb->error_count)) goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) goto err_out; } } if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: return -EFAULT; } static struct async *reap_as(struct usb_dev_state *ps) { DECLARE_WAITQUEUE(wait, current); struct async *as = NULL; struct usb_device *dev = ps->dev; add_wait_queue(&ps->wait, &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); as = async_getcompleted(ps); if (as || !connected(ps)) break; if (signal_pending(current)) break; usb_unlock_device(dev); schedule(); usb_lock_device(dev); } remove_wait_queue(&ps->wait, &wait); set_current_state(TASK_RUNNING); return as; } static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #ifdef CONFIG_COMPAT static int proc_control_compat(struct usb_dev_state *ps, struct usbdevfs_ctrltransfer32 __user *p32) { struct usbdevfs_ctrltransfer ctrl; u32 udata; if (copy_from_user(&ctrl, p32, sizeof(*p32) - sizeof(compat_caddr_t)) || get_user(udata, &p32->data)) return -EFAULT; ctrl.data = compat_ptr(udata); return do_proc_control(ps, &ctrl); } static int proc_bulk_compat(struct usb_dev_state *ps, struct usbdevfs_bulktransfer32 __user *p32) { struct usbdevfs_bulktransfer bulk; compat_caddr_t addr; if (get_user(bulk.ep, &p32->ep) || get_user(bulk.len, &p32->len) || get_user(bulk.timeout, &p32->timeout) || get_user(addr, &p32->data)) return -EFAULT; bulk.data = compat_ptr(addr); return do_proc_bulk(ps, &bulk); } static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal32 ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_int = ds.context; return 0; } static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { struct usbdevfs_urb32 urb32; if (copy_from_user(&urb32, uurb, sizeof(*uurb))) return -EFAULT; kurb->type = urb32.type; kurb->endpoint = urb32.endpoint; kurb->status = urb32.status; kurb->flags = urb32.flags; kurb->buffer = compat_ptr(urb32.buffer); kurb->buffer_length = urb32.buffer_length; kurb->actual_length = urb32.actual_length; kurb->start_frame = urb32.start_frame; kurb->number_of_packets = urb32.number_of_packets; kurb->error_count = urb32.error_count; kurb->signr = urb32.signr; kurb->usercontext = compat_ptr(urb32.usercontext); return 0; } static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; sigval_t userurb_sigval; if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg)) return -EFAULT; memset(&userurb_sigval, 0, sizeof(userurb_sigval)); userurb_sigval.sival_int = ptr_to_compat(arg); return proc_do_submiturb(ps, &uurb, ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc, arg, userurb_sigval); } static int processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; } if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; } static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval; snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -ENODEV; } static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); if (as) { snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } else { retval = (connected(ps) ? -EAGAIN : -ENODEV); } return retval; } #endif static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext.sival_ptr = ds.context; return 0; } static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; return claimintf(ps, ifnum); } static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg) { unsigned int ifnum; int ret; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; ret = releaseintf(ps, ifnum); if (ret < 0) return ret; destroy_async_on_interface(ps, ifnum); return 0; } static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl) { int size; void *buf = NULL; int retval = 0; struct usb_interface *intf = NULL; struct usb_driver *driver = NULL; if (ps->privileges_dropped) return -EACCES; if (!connected(ps)) return -ENODEV; /* alloc buffer */ size = _IOC_SIZE(ctl->ioctl_code); if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) { if (copy_from_user(buf, ctl->data, size)) { kfree(buf); return -EFAULT; } } else { memset(buf, 0, size); } } if (ps->dev->state != USB_STATE_CONFIGURED) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; else switch (ctl->ioctl_code) { /* disconnect kernel driver from interface */ case USBDEVFS_DISCONNECT: if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } else retval = -ENODATA; break; /* let kernel drivers try to (re)bind to the interface */ case USBDEVFS_CONNECT: if (!intf->dev.driver) retval = device_attach(&intf->dev); else retval = -EBUSY; break; /* talk directly to the interface's driver */ default: if (intf->dev.driver) driver = to_usb_driver(intf->dev.driver); if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; } else { retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } } /* cleanup and return */ if (retval >= 0 && (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0 && size > 0 && copy_to_user(ctl->data, buf, size) != 0) retval = -EFAULT; kfree(buf); return retval; } static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_ioctl ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return proc_ioctl(ps, &ctrl); } #ifdef CONFIG_COMPAT static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg) { struct usbdevfs_ioctl32 ioc32; struct usbdevfs_ioctl ctrl; if (copy_from_user(&ioc32, compat_ptr(arg), sizeof(ioc32))) return -EFAULT; ctrl.ifno = ioc32.ifno; ctrl.ioctl_code = ioc32.ioctl_code; ctrl.data = compat_ptr(ioc32.data); return proc_ioctl(ps, &ctrl); } #endif static int proc_claim_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; int rc; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; rc = usb_hub_claim_port(ps->dev, portnum, ps); if (rc == 0) snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", portnum, task_pid_nr(current), current->comm); return rc; } static int proc_release_port(struct usb_dev_state *ps, void __user *arg) { unsigned portnum; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; return usb_hub_release_port(ps->dev, portnum, ps); } static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg) { __u32 caps; caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP | USBDEVFS_CAP_DROP_PRIVILEGES | USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND; if (!ps->dev->bus->no_stop_on_short) caps |= USBDEVFS_CAP_BULK_CONTINUATION; if (ps->dev->bus->sg_tablesize) caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER; if (put_user(caps, (__u32 __user *)arg)) return -EFAULT; return 0; } static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg) { struct usbdevfs_disconnect_claim dc; struct usb_interface *intf; if (copy_from_user(&dc, arg, sizeof(dc))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, dc.interface); if (!intf) return -EINVAL; if (intf->dev.driver) { struct usb_driver *driver = to_usb_driver(intf->dev.driver); if (ps->privileges_dropped) return -EACCES; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) != 0) return -EBUSY; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) == 0) return -EBUSY; dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } return claimintf(ps, dc.interface); } static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_streams, num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL); kfree(eps); return r; } static int proc_free_streams(struct usb_dev_state *ps, void __user *arg) { unsigned num_eps; struct usb_host_endpoint **eps; struct usb_interface *intf; int r; r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf); if (r) return r; destroy_async_on_interface(ps, intf->altsetting[0].desc.bInterfaceNumber); r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL); kfree(eps); return r; } static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg) { u32 data; if (copy_from_user(&data, arg, sizeof(data))) return -EFAULT; /* This is a one way operation. Once privileges are * dropped, you cannot regain them. You may however reissue * this ioctl to shrink the allowed interfaces mask. */ ps->interface_allowed_mask &= data; ps->privileges_dropped = true; return 0; } static int proc_forbid_suspend(struct usb_dev_state *ps) { int ret = 0; if (ps->suspend_allowed) { ret = usb_autoresume_device(ps->dev); if (ret == 0) ps->suspend_allowed = false; else if (ret != -ENODEV) ret = -EIO; } return ret; } static int proc_allow_suspend(struct usb_dev_state *ps) { if (!connected(ps)) return -ENODEV; WRITE_ONCE(ps->not_yet_resumed, 1); if (!ps->suspend_allowed) { usb_autosuspend_device(ps->dev); ps->suspend_allowed = true; } return 0; } static int proc_wait_for_resume(struct usb_dev_state *ps) { int ret; usb_unlock_device(ps->dev); ret = wait_event_interruptible(ps->wait_for_resume, READ_ONCE(ps->not_yet_resumed) == 0); usb_lock_device(ps->dev); if (ret != 0) return -EINTR; return proc_forbid_suspend(ps); } /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, void __user *p) { struct usb_dev_state *ps = file->private_data; struct inode *inode = file_inode(file); struct usb_device *dev = ps->dev; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; usb_lock_device(dev); /* Reap operations are allowed even after disconnection */ switch (cmd) { case USBDEVFS_REAPURB: snoop(&dev->dev, "%s: REAPURB\n", __func__); ret = proc_reapurb(ps, p); goto done; case USBDEVFS_REAPURBNDELAY: snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ret = proc_reapurbnonblock(ps, p); goto done; #ifdef CONFIG_COMPAT case USBDEVFS_REAPURB32: snoop(&dev->dev, "%s: REAPURB32\n", __func__); ret = proc_reapurb_compat(ps, p); goto done; case USBDEVFS_REAPURBNDELAY32: snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ret = proc_reapurbnonblock_compat(ps, p); goto done; #endif } if (!connected(ps)) { usb_unlock_device(dev); return -ENODEV; } switch (cmd) { case USBDEVFS_CONTROL: snoop(&dev->dev, "%s: CONTROL\n", __func__); ret = proc_control(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK: snoop(&dev->dev, "%s: BULK\n", __func__); ret = proc_bulk(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESETEP: snoop(&dev->dev, "%s: RESETEP\n", __func__); ret = proc_resetep(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_RESET: snoop(&dev->dev, "%s: RESET\n", __func__); ret = proc_resetdevice(ps); break; case USBDEVFS_CLEAR_HALT: snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__); ret = proc_clearhalt(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_GETDRIVER: snoop(&dev->dev, "%s: GETDRIVER\n", __func__); ret = proc_getdriver(ps, p); break; case USBDEVFS_CONNECTINFO: snoop(&dev->dev, "%s: CONNECTINFO\n", __func__); ret = proc_connectinfo(ps, p); break; case USBDEVFS_SETINTERFACE: snoop(&dev->dev, "%s: SETINTERFACE\n", __func__); ret = proc_setintf(ps, p); break; case USBDEVFS_SETCONFIGURATION: snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__); ret = proc_setconfig(ps, p); break; case USBDEVFS_SUBMITURB: snoop(&dev->dev, "%s: SUBMITURB\n", __func__); ret = proc_submiturb(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; #ifdef CONFIG_COMPAT case USBDEVFS_CONTROL32: snoop(&dev->dev, "%s: CONTROL32\n", __func__); ret = proc_control_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_BULK32: snoop(&dev->dev, "%s: BULK32\n", __func__); ret = proc_bulk_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_DISCSIGNAL32: snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__); ret = proc_disconnectsignal_compat(ps, p); break; case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); ret = proc_submiturb_compat(ps, p); if (ret >= 0) inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); break; case USBDEVFS_IOCTL32: snoop(&dev->dev, "%s: IOCTL32\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif case USBDEVFS_DISCARDURB: snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p); ret = proc_unlinkurb(ps, p); break; case USBDEVFS_DISCSIGNAL: snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); ret = proc_disconnectsignal(ps, p); break; case USBDEVFS_CLAIMINTERFACE: snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__); ret = proc_claiminterface(ps, p); break; case USBDEVFS_RELEASEINTERFACE: snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__); ret = proc_releaseinterface(ps, p); break; case USBDEVFS_IOCTL: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; case USBDEVFS_CLAIM_PORT: snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); ret = proc_claim_port(ps, p); break; case USBDEVFS_RELEASE_PORT: snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); ret = proc_release_port(ps, p); break; case USBDEVFS_GET_CAPABILITIES: ret = proc_get_capabilities(ps, p); break; case USBDEVFS_DISCONNECT_CLAIM: ret = proc_disconnect_claim(ps, p); break; case USBDEVFS_ALLOC_STREAMS: ret = proc_alloc_streams(ps, p); break; case USBDEVFS_FREE_STREAMS: ret = proc_free_streams(ps, p); break; case USBDEVFS_DROP_PRIVILEGES: ret = proc_drop_privileges(ps, p); break; case USBDEVFS_GET_SPEED: ret = ps->dev->speed; break; case USBDEVFS_FORBID_SUSPEND: ret = proc_forbid_suspend(ps); break; case USBDEVFS_ALLOW_SUSPEND: ret = proc_allow_suspend(ps); break; case USBDEVFS_WAIT_FOR_RESUME: ret = proc_wait_for_resume(ps); break; } /* Handle variable-length commands */ switch (cmd & ~IOCSIZE_MASK) { case USBDEVFS_CONNINFO_EX(0): ret = proc_conninfo_ex(ps, p, _IOC_SIZE(cmd)); break; } done: usb_unlock_device(dev); if (ret >= 0) inode_set_atime_to_ts(inode, current_time(inode)); return ret; } static long usbdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); return ret; } /* No kernel lock - fine */ static __poll_t usbdev_poll(struct file *file, struct poll_table_struct *wait) { struct usb_dev_state *ps = file->private_data; __poll_t mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= EPOLLOUT | EPOLLWRNORM; if (!connected(ps)) mask |= EPOLLHUP; if (list_empty(&ps->list)) mask |= EPOLLERR; return mask; } const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, .llseek = no_seek_end_llseek, .read = usbdev_read, .poll = usbdev_poll, .unlocked_ioctl = usbdev_ioctl, .compat_ioctl = compat_ptr_ioctl, .mmap = usbdev_mmap, .open = usbdev_open, .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) { struct usb_dev_state *ps; /* Protect against simultaneous resume */ mutex_lock(&usbfs_mutex); while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct usb_dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); WRITE_ONCE(ps->not_yet_resumed, 0); wake_up_all(&ps->wait_for_resume); list_del_init(&ps->list); if (ps->discsignr) kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext, ps->disc_pid, ps->cred); } mutex_unlock(&usbfs_mutex); } static int usbdev_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: break; case USB_DEVICE_REMOVE: usbdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { .notifier_call = usbdev_notify, }; static struct cdev usb_device_cdev; int __init usb_devio_init(void) { int retval; retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX, "usb_device"); if (retval) { printk(KERN_ERR "Unable to register minors for usb_device\n"); goto out; } cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { printk(KERN_ERR "Unable to get usb_device major %d\n", USB_DEVICE_MAJOR); goto error_cdev; } usb_register_notify(&usbdev_nb); out: return retval; error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } void usb_devio_cleanup(void) { usb_unregister_notify(&usbdev_nb); cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); }
1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 // SPDX-License-Identifier: GPL-2.0-only /* * Edirol UA-101/UA-1000 driver * Copyright (c) Clemens Ladisch <clemens@ladisch.de> */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "../usbaudio.h" #include "../midi.h" MODULE_DESCRIPTION("Edirol UA-101/1000 driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); /* * Should not be lower than the minimum scheduling delay of the host * controller. Some Intel controllers need more than one frame; as long as * that driver doesn't tell us about this, use 1.5 frames just to be sure. */ #define MIN_QUEUE_LENGTH 12 /* Somewhat random. */ #define MAX_QUEUE_LENGTH 30 /* * This magic value optimizes memory usage efficiency for the UA-101's packet * sizes at all sample rates, taking into account the stupid cache pool sizes * that usb_alloc_coherent() uses. */ #define DEFAULT_QUEUE_LENGTH 21 #define MAX_PACKET_SIZE 672 /* hardware specific */ #define MAX_MEMORY_BUFFERS DIV_ROUND_UP(MAX_QUEUE_LENGTH, \ PAGE_SIZE / MAX_PACKET_SIZE) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static unsigned int queue_length = 21; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "card index"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "enable card"); module_param(queue_length, uint, 0644); MODULE_PARM_DESC(queue_length, "USB queue length in microframes, " __stringify(MIN_QUEUE_LENGTH)"-"__stringify(MAX_QUEUE_LENGTH)); enum { INTF_PLAYBACK, INTF_CAPTURE, INTF_MIDI, INTF_COUNT }; /* bits in struct ua101::states */ enum { USB_CAPTURE_RUNNING, USB_PLAYBACK_RUNNING, ALSA_CAPTURE_OPEN, ALSA_PLAYBACK_OPEN, ALSA_CAPTURE_RUNNING, ALSA_PLAYBACK_RUNNING, CAPTURE_URB_COMPLETED, PLAYBACK_URB_COMPLETED, DISCONNECTED, }; struct ua101 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf[INTF_COUNT]; int card_index; struct snd_pcm *pcm; struct list_head midi_list; u64 format_bit; unsigned int rate; unsigned int packets_per_second; spinlock_t lock; struct mutex mutex; unsigned long states; /* FIFO to synchronize playback rate to capture rate */ unsigned int rate_feedback_start; unsigned int rate_feedback_count; u8 rate_feedback[MAX_QUEUE_LENGTH]; struct list_head ready_playback_urbs; struct work_struct playback_work; wait_queue_head_t alsa_capture_wait; wait_queue_head_t rate_feedback_wait; wait_queue_head_t alsa_playback_wait; struct ua101_stream { struct snd_pcm_substream *substream; unsigned int usb_pipe; unsigned int channels; unsigned int frame_bytes; unsigned int max_packet_bytes; unsigned int period_pos; unsigned int buffer_pos; unsigned int queue_length; struct ua101_urb { struct urb urb; struct usb_iso_packet_descriptor iso_frame_desc[1]; struct list_head ready_list; } *urbs[MAX_QUEUE_LENGTH]; struct { unsigned int size; void *addr; dma_addr_t dma; } buffers[MAX_MEMORY_BUFFERS]; } capture, playback; }; static DEFINE_MUTEX(devices_mutex); static unsigned int devices_used; static struct usb_driver ua101_driver; static void abort_alsa_playback(struct ua101 *ua); static void abort_alsa_capture(struct ua101 *ua); static const char *usb_error_string(int err) { switch (err) { case -ENODEV: return "no device"; case -ENOENT: return "endpoint not enabled"; case -EPIPE: return "endpoint stalled"; case -ENOSPC: return "not enough bandwidth"; case -ESHUTDOWN: return "device disabled"; case -EHOSTUNREACH: return "device suspended"; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: return "internal error"; default: return "unknown error"; } } static void abort_usb_capture(struct ua101 *ua) { if (test_and_clear_bit(USB_CAPTURE_RUNNING, &ua->states)) { wake_up(&ua->alsa_capture_wait); wake_up(&ua->rate_feedback_wait); } } static void abort_usb_playback(struct ua101 *ua) { if (test_and_clear_bit(USB_PLAYBACK_RUNNING, &ua->states)) wake_up(&ua->alsa_playback_wait); } static void playback_urb_complete(struct urb *usb_urb) { struct ua101_urb *urb = (struct ua101_urb *)usb_urb; struct ua101 *ua = urb->urb.context; unsigned long flags; if (unlikely(urb->urb.status == -ENOENT || /* unlinked */ urb->urb.status == -ENODEV || /* device removed */ urb->urb.status == -ECONNRESET || /* unlinked */ urb->urb.status == -ESHUTDOWN)) { /* device disabled */ abort_usb_playback(ua); abort_alsa_playback(ua); return; } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) { /* append URB to FIFO */ spin_lock_irqsave(&ua->lock, flags); list_add_tail(&urb->ready_list, &ua->ready_playback_urbs); if (ua->rate_feedback_count > 0) queue_work(system_highpri_wq, &ua->playback_work); ua->playback.substream->runtime->delay -= urb->urb.iso_frame_desc[0].length / ua->playback.frame_bytes; spin_unlock_irqrestore(&ua->lock, flags); } } static void first_playback_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = playback_urb_complete; playback_urb_complete(urb); set_bit(PLAYBACK_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_playback_wait); } /* copy data from the ALSA ring buffer into the URB buffer */ static bool copy_playback_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; const u8 *source; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; source = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(urb->transfer_buffer, source, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(urb->transfer_buffer, source, frames1 * frame_bytes); memcpy(urb->transfer_buffer + frames1 * frame_bytes, runtime->dma_area, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static inline void add_with_wraparound(struct ua101 *ua, unsigned int *value, unsigned int add) { *value += add; if (*value >= ua->playback.queue_length) *value -= ua->playback.queue_length; } static void playback_work(struct work_struct *work) { struct ua101 *ua = container_of(work, struct ua101, playback_work); unsigned long flags; unsigned int frames; struct ua101_urb *urb; bool do_period_elapsed = false; int err; if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states))) return; /* * Synchronizing the playback rate to the capture rate is done by using * the same sequence of packet sizes for both streams. * Submitting a playback URB therefore requires both a ready URB and * the size of the corresponding capture packet, i.e., both playback * and capture URBs must have been completed. Since the USB core does * not guarantee that playback and capture complete callbacks are * called alternately, we use two FIFOs for packet sizes and read URBs; * submitting playback URBs is possible as long as both FIFOs are * nonempty. */ spin_lock_irqsave(&ua->lock, flags); while (ua->rate_feedback_count > 0 && !list_empty(&ua->ready_playback_urbs)) { /* take packet size out of FIFO */ frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; /* take URB out of FIFO */ urb = list_first_entry(&ua->ready_playback_urbs, struct ua101_urb, ready_list); list_del(&urb->ready_list); /* fill packet with data or silence */ urb->urb.iso_frame_desc[0].length = frames * ua->playback.frame_bytes; if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) do_period_elapsed |= copy_playback_data(&ua->playback, &urb->urb, frames); else memset(urb->urb.transfer_buffer, 0, urb->urb.iso_frame_desc[0].length); /* and off you go ... */ err = usb_submit_urb(&urb->urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); abort_usb_playback(ua); abort_alsa_playback(ua); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return; } ua->playback.substream->runtime->delay += frames; } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(ua->playback.substream); } /* copy data from the URB buffer into the ALSA ring buffer */ static bool copy_capture_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; u8 *dest; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; dest = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(dest, urb->transfer_buffer, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(dest, urb->transfer_buffer, frames1 * frame_bytes); memcpy(runtime->dma_area, urb->transfer_buffer + frames1 * frame_bytes, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static void capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; struct ua101_stream *stream = &ua->capture; unsigned long flags; unsigned int frames, write_ptr; bool do_period_elapsed; int err; if (unlikely(urb->status == -ENOENT || /* unlinked */ urb->status == -ENODEV || /* device removed */ urb->status == -ECONNRESET || /* unlinked */ urb->status == -ESHUTDOWN)) /* device disabled */ goto stream_stopped; if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0) frames = urb->iso_frame_desc[0].actual_length / stream->frame_bytes; else frames = 0; spin_lock_irqsave(&ua->lock, flags); if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) do_period_elapsed = copy_capture_data(stream, urb, frames); else do_period_elapsed = false; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) { err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); goto stream_stopped; } /* append packet size to FIFO */ write_ptr = ua->rate_feedback_start; add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count); ua->rate_feedback[write_ptr] = frames; if (ua->rate_feedback_count < ua->playback.queue_length) { ua->rate_feedback_count++; if (ua->rate_feedback_count == ua->playback.queue_length) wake_up(&ua->rate_feedback_wait); } else { /* * Ring buffer overflow; this happens when the playback * stream is not running. Throw away the oldest entry, * so that the playback stream, when it starts, sees * the most recent packet sizes. */ add_with_wraparound(ua, &ua->rate_feedback_start, 1); } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) && !list_empty(&ua->ready_playback_urbs)) queue_work(system_highpri_wq, &ua->playback_work); } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(stream->substream); return; stream_stopped: abort_usb_playback(ua); abort_usb_capture(ua); abort_alsa_playback(ua); abort_alsa_capture(ua); } static void first_capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = capture_urb_complete; capture_urb_complete(urb); set_bit(CAPTURE_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_capture_wait); } static int submit_stream_urbs(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { int err = usb_submit_urb(&stream->urbs[i]->urb, GFP_KERNEL); if (err < 0) { dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void kill_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) if (stream->urbs[i]) usb_kill_urb(&stream->urbs[i]->urb); } static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 1) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 1); if (err < 0) { dev_err(&ua->dev->dev, "cannot initialize interface; error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; if (!ua->intf[intf_index]) return; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 0) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 0); if (err < 0 && !test_bit(DISCONNECTED, &ua->states)) dev_warn(&ua->dev->dev, "interface reset failed; error %d: %s\n", err, usb_error_string(err)); } } static void stop_usb_capture(struct ua101 *ua) { clear_bit(USB_CAPTURE_RUNNING, &ua->states); kill_stream_urbs(&ua->capture); disable_iso_interface(ua, INTF_CAPTURE); } static int start_usb_capture(struct ua101 *ua) { int err; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->capture); err = enable_iso_interface(ua, INTF_CAPTURE); if (err < 0) return err; clear_bit(CAPTURE_URB_COMPLETED, &ua->states); ua->capture.urbs[0]->urb.complete = first_capture_urb_complete; ua->rate_feedback_start = 0; ua->rate_feedback_count = 0; set_bit(USB_CAPTURE_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->capture); if (err < 0) stop_usb_capture(ua); return err; } static void stop_usb_playback(struct ua101 *ua) { clear_bit(USB_PLAYBACK_RUNNING, &ua->states); kill_stream_urbs(&ua->playback); cancel_work_sync(&ua->playback_work); disable_iso_interface(ua, INTF_PLAYBACK); } static int start_usb_playback(struct ua101 *ua) { unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); cancel_work_sync(&ua->playback_work); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err; } static void abort_alsa_capture(struct ua101 *ua) { if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) snd_pcm_stop_xrun(ua->capture.substream); } static void abort_alsa_playback(struct ua101 *ua) { if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) snd_pcm_stop_xrun(ua->playback.substream); } static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream, unsigned int channels) { int err; substream->runtime->hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_FIFO_IN_FRAMES; substream->runtime->hw.formats = ua->format_bit; substream->runtime->hw.rates = snd_pcm_rate_to_rate_bit(ua->rate); substream->runtime->hw.rate_min = ua->rate; substream->runtime->hw.rate_max = ua->rate; substream->runtime->hw.channels_min = channels; substream->runtime->hw.channels_max = channels; substream->runtime->hw.buffer_bytes_max = 45000 * 1024; substream->runtime->hw.period_bytes_min = 1; substream->runtime->hw.period_bytes_max = UINT_MAX; substream->runtime->hw.periods_min = 2; substream->runtime->hw.periods_max = UINT_MAX; err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1500000 / ua->packets_per_second, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); return err; } static int capture_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->capture.substream = substream; err = set_stream_hw(ua, substream, ua->capture.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate, ua->packets_per_second); substream->runtime->delay = substream->runtime->hw.fifo_size; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) set_bit(ALSA_CAPTURE_OPEN, &ua->states); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->playback.substream = substream; err = set_stream_hw(ua, substream, ua->playback.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate * ua->playback.queue_length, ua->packets_per_second); mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err < 0) goto error; err = start_usb_playback(ua); if (err < 0) { if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); goto error; } set_bit(ALSA_PLAYBACK_OPEN, &ua->states); error: mutex_unlock(&ua->mutex); return err; } static int capture_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); clear_bit(ALSA_CAPTURE_OPEN, &ua->states); if (!test_bit(ALSA_PLAYBACK_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int playback_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); stop_usb_playback(ua); clear_bit(ALSA_PLAYBACK_OPEN, &ua->states); if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int capture_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); return err; } static int capture_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* * The EHCI driver schedules the first packet of an iso stream at 10 ms * in the future, i.e., no data is actually captured for that long. * Take the wait here so that the stream is known to be actually * running when the start trigger has been called. */ wait_event(ua->alsa_capture_wait, test_bit(CAPTURE_URB_COMPLETED, &ua->states) || !test_bit(USB_CAPTURE_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; ua->capture.period_pos = 0; ua->capture.buffer_pos = 0; return 0; } static int playback_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* see the comment in capture_pcm_prepare() */ wait_event(ua->alsa_playback_wait, test_bit(PLAYBACK_URB_COMPLETED, &ua->states) || !test_bit(USB_PLAYBACK_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; substream->runtime->delay = 0; ua->playback.period_pos = 0; ua->playback.buffer_pos = 0; return 0; } static int capture_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua, struct ua101_stream *stream) { unsigned long flags; unsigned int pos; spin_lock_irqsave(&ua->lock, flags); pos = stream->buffer_pos; spin_unlock_irqrestore(&ua->lock, flags); return pos; } static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->capture); } static snd_pcm_uframes_t playback_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->playback); } static const struct snd_pcm_ops capture_pcm_ops = { .open = capture_pcm_open, .close = capture_pcm_close, .hw_params = capture_pcm_hw_params, .prepare = capture_pcm_prepare, .trigger = capture_pcm_trigger, .pointer = capture_pcm_pointer, }; static const struct snd_pcm_ops playback_pcm_ops = { .open = playback_pcm_open, .close = playback_pcm_close, .hw_params = playback_pcm_hw_params, .prepare = playback_pcm_prepare, .trigger = playback_pcm_trigger, .pointer = playback_pcm_pointer, }; static const struct uac_format_type_i_discrete_descriptor * find_format_descriptor(struct usb_interface *interface) { struct usb_host_interface *alt; u8 *extra; int extralen; if (interface->num_altsetting != 2) { dev_err(&interface->dev, "invalid num_altsetting\n"); return NULL; } alt = &interface->altsetting[0]; if (alt->desc.bNumEndpoints != 0) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } alt = &interface->altsetting[1]; if (alt->desc.bNumEndpoints != 1) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } extra = alt->extra; extralen = alt->extralen; while (extralen >= sizeof(struct usb_descriptor_header)) { struct uac_format_type_i_discrete_descriptor *desc; desc = (struct uac_format_type_i_discrete_descriptor *)extra; if (desc->bLength > extralen) { dev_err(&interface->dev, "descriptor overflow\n"); return NULL; } if (desc->bLength == UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1) && desc->bDescriptorType == USB_DT_CS_INTERFACE && desc->bDescriptorSubtype == UAC_FORMAT_TYPE) { if (desc->bFormatType != UAC_FORMAT_TYPE_I_PCM || desc->bSamFreqType != 1) { dev_err(&interface->dev, "invalid format type\n"); return NULL; } return desc; } extralen -= desc->bLength; extra += desc->bLength; } dev_err(&interface->dev, "sample format descriptor not found\n"); return NULL; } static int detect_usb_format(struct ua101 *ua) { const struct uac_format_type_i_discrete_descriptor *fmt_capture; const struct uac_format_type_i_discrete_descriptor *fmt_playback; const struct usb_endpoint_descriptor *epd; unsigned int rate2; fmt_capture = find_format_descriptor(ua->intf[INTF_CAPTURE]); fmt_playback = find_format_descriptor(ua->intf[INTF_PLAYBACK]); if (!fmt_capture || !fmt_playback) return -ENXIO; switch (fmt_capture->bSubframeSize) { case 3: ua->format_bit = SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: ua->format_bit = SNDRV_PCM_FMTBIT_S32_LE; break; default: dev_err(&ua->dev->dev, "sample width is not 24 or 32 bits\n"); return -ENXIO; } if (fmt_capture->bSubframeSize != fmt_playback->bSubframeSize) { dev_err(&ua->dev->dev, "playback/capture sample widths do not match\n"); return -ENXIO; } if (fmt_capture->bBitResolution != 24 || fmt_playback->bBitResolution != 24) { dev_err(&ua->dev->dev, "sample width is not 24 bits\n"); return -ENXIO; } ua->rate = combine_triple(fmt_capture->tSamFreq[0]); rate2 = combine_triple(fmt_playback->tSamFreq[0]); if (ua->rate != rate2) { dev_err(&ua->dev->dev, "playback/capture rates do not match: %u/%u\n", rate2, ua->rate); return -ENXIO; } switch (ua->dev->speed) { case USB_SPEED_FULL: ua->packets_per_second = 1000; break; case USB_SPEED_HIGH: ua->packets_per_second = 8000; break; default: dev_err(&ua->dev->dev, "unknown device speed\n"); return -ENXIO; } ua->capture.channels = fmt_capture->bNrChannels; ua->playback.channels = fmt_playback->bNrChannels; ua->capture.frame_bytes = fmt_capture->bSubframeSize * ua->capture.channels; ua->playback.frame_bytes = fmt_playback->bSubframeSize * ua->playback.channels; epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_in(epd) || usb_endpoint_maxp(epd) == 0) { dev_err(&ua->dev->dev, "invalid capture endpoint\n"); return -ENXIO; } ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, usb_endpoint_num(epd)); ua->capture.max_packet_bytes = usb_endpoint_maxp(epd); epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_out(epd) || usb_endpoint_maxp(epd) == 0) { dev_err(&ua->dev->dev, "invalid playback endpoint\n"); return -ENXIO; } ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, usb_endpoint_num(epd)); ua->playback.max_packet_bytes = usb_endpoint_maxp(epd); return 0; } static int alloc_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int remaining_packets, packets, packets_per_page, i; size_t size; stream->queue_length = queue_length; stream->queue_length = max(stream->queue_length, (unsigned int)MIN_QUEUE_LENGTH); stream->queue_length = min(stream->queue_length, (unsigned int)MAX_QUEUE_LENGTH); /* * The cache pool sizes used by usb_alloc_coherent() (128, 512, 2048) are * quite bad when used with the packet sizes of this device (e.g. 280, * 520, 624). Therefore, we allocate and subdivide entire pages, using * a smaller buffer only for the last chunk. */ remaining_packets = stream->queue_length; packets_per_page = PAGE_SIZE / stream->max_packet_bytes; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) { packets = min(remaining_packets, packets_per_page); size = packets * stream->max_packet_bytes; stream->buffers[i].addr = usb_alloc_coherent(ua->dev, size, GFP_KERNEL, &stream->buffers[i].dma); if (!stream->buffers[i].addr) return -ENOMEM; stream->buffers[i].size = size; remaining_packets -= packets; if (!remaining_packets) break; } if (remaining_packets) { dev_err(&ua->dev->dev, "too many packets\n"); return -ENXIO; } return 0; } static void free_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) usb_free_coherent(ua->dev, stream->buffers[i].size, stream->buffers[i].addr, stream->buffers[i].dma); } static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream, void (*urb_complete)(struct urb *)) { unsigned max_packet_size = stream->max_packet_bytes; struct ua101_urb *urb; unsigned int b, u = 0; for (b = 0; b < ARRAY_SIZE(stream->buffers); ++b) { unsigned int size = stream->buffers[b].size; u8 *addr = stream->buffers[b].addr; dma_addr_t dma = stream->buffers[b].dma; while (size >= max_packet_size) { if (u >= stream->queue_length) goto bufsize_error; urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) return -ENOMEM; usb_init_urb(&urb->urb); urb->urb.dev = ua->dev; urb->urb.pipe = stream->usb_pipe; urb->urb.transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->urb.transfer_buffer = addr; urb->urb.transfer_dma = dma; urb->urb.transfer_buffer_length = max_packet_size; urb->urb.number_of_packets = 1; urb->urb.interval = 1; urb->urb.context = ua; urb->urb.complete = urb_complete; urb->urb.iso_frame_desc[0].offset = 0; urb->urb.iso_frame_desc[0].length = max_packet_size; stream->urbs[u++] = urb; size -= max_packet_size; addr += max_packet_size; dma += max_packet_size; } } if (u == stream->queue_length) return 0; bufsize_error: dev_err(&ua->dev->dev, "internal buffer size error\n"); return -ENXIO; } static void free_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { kfree(stream->urbs[i]); stream->urbs[i] = NULL; } } static void free_usb_related_resources(struct ua101 *ua, struct usb_interface *interface) { unsigned int i; struct usb_interface *intf; mutex_lock(&ua->mutex); free_stream_urbs(&ua->capture); free_stream_urbs(&ua->playback); mutex_unlock(&ua->mutex); free_stream_buffers(ua, &ua->capture); free_stream_buffers(ua, &ua->playback); for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) { mutex_lock(&ua->mutex); intf = ua->intf[i]; ua->intf[i] = NULL; mutex_unlock(&ua->mutex); if (intf) { usb_set_intfdata(intf, NULL); if (intf != interface) usb_driver_release_interface(&ua101_driver, intf); } } } static void ua101_card_free(struct snd_card *card) { struct ua101 *ua = card->private_data; mutex_destroy(&ua->mutex); } static int ua101_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { static const struct snd_usb_midi_endpoint_info midi_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk midi_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &midi_ep }; static const int intf_numbers[2][3] = { { /* UA-101 */ [INTF_PLAYBACK] = 0, [INTF_CAPTURE] = 1, [INTF_MIDI] = 2, }, { /* UA-1000 */ [INTF_CAPTURE] = 1, [INTF_PLAYBACK] = 2, [INTF_MIDI] = 3, }, }; struct snd_card *card; struct ua101 *ua; unsigned int card_index, i; int is_ua1000; const char *name; char usb_path[32]; int err; is_ua1000 = usb_id->idProduct == 0x0044; if (interface->altsetting->desc.bInterfaceNumber != intf_numbers[is_ua1000][0]) return -ENODEV; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (enable[card_index] && !(devices_used & (1 << card_index))) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_new(&interface->dev, index[card_index], id[card_index], THIS_MODULE, sizeof(*ua), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } card->private_free = ua101_card_free; ua = card->private_data; ua->dev = interface_to_usbdev(interface); ua->card = card; ua->card_index = card_index; INIT_LIST_HEAD(&ua->midi_list); spin_lock_init(&ua->lock); mutex_init(&ua->mutex); INIT_LIST_HEAD(&ua->ready_playback_urbs); INIT_WORK(&ua->playback_work, playback_work); init_waitqueue_head(&ua->alsa_capture_wait); init_waitqueue_head(&ua->rate_feedback_wait); init_waitqueue_head(&ua->alsa_playback_wait); ua->intf[0] = interface; for (i = 1; i < ARRAY_SIZE(ua->intf); ++i) { ua->intf[i] = usb_ifnum_to_if(ua->dev, intf_numbers[is_ua1000][i]); if (!ua->intf[i]) { dev_err(&ua->dev->dev, "interface %u not found\n", intf_numbers[is_ua1000][i]); err = -ENXIO; goto probe_error; } err = usb_driver_claim_interface(&ua101_driver, ua->intf[i], ua); if (err < 0) { ua->intf[i] = NULL; err = -EBUSY; goto probe_error; } } err = detect_usb_format(ua); if (err < 0) goto probe_error; name = usb_id->idProduct == 0x0044 ? "UA-1000" : "UA-101"; strcpy(card->driver, "UA-101"); strcpy(card->shortname, name); usb_make_path(ua->dev, usb_path, sizeof(usb_path)); snprintf(ua->card->longname, sizeof(ua->card->longname), "EDIROL %s (serial %s), %u Hz at %s, %s speed", name, ua->dev->serial ? ua->dev->serial : "?", ua->rate, usb_path, ua->dev->speed == USB_SPEED_HIGH ? "high" : "full"); err = alloc_stream_buffers(ua, &ua->capture); if (err < 0) goto probe_error; err = alloc_stream_buffers(ua, &ua->playback); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->capture, capture_urb_complete); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->playback, playback_urb_complete); if (err < 0) goto probe_error; err = snd_pcm_new(card, name, 0, 1, 1, &ua->pcm); if (err < 0) goto probe_error; ua->pcm->private_data = ua; strcpy(ua->pcm->name, name); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops); snd_pcm_set_managed_buffer_all(ua->pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); err = snd_usbmidi_create(card, ua->intf[INTF_MIDI], &ua->midi_list, &midi_quirk); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, ua); devices_used |= 1 << card_index; mutex_unlock(&devices_mutex); return 0; probe_error: free_usb_related_resources(ua, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void ua101_disconnect(struct usb_interface *interface) { struct ua101 *ua = usb_get_intfdata(interface); struct list_head *midi; if (!ua) return; mutex_lock(&devices_mutex); set_bit(DISCONNECTED, &ua->states); wake_up(&ua->rate_feedback_wait); /* make sure that userspace cannot create new requests */ snd_card_disconnect(ua->card); /* make sure that there are no pending USB requests */ list_for_each(midi, &ua->midi_list) snd_usbmidi_disconnect(midi); abort_alsa_playback(ua); abort_alsa_capture(ua); mutex_lock(&ua->mutex); stop_usb_playback(ua); stop_usb_capture(ua); mutex_unlock(&ua->mutex); free_usb_related_resources(ua, interface); devices_used &= ~(1 << ua->card_index); snd_card_free_when_closed(ua->card); mutex_unlock(&devices_mutex); } static const struct usb_device_id ua101_ids[] = { { USB_DEVICE(0x0582, 0x0044) }, /* UA-1000 high speed */ { USB_DEVICE(0x0582, 0x007d) }, /* UA-101 high speed */ { USB_DEVICE(0x0582, 0x008d) }, /* UA-101 full speed */ { } }; MODULE_DEVICE_TABLE(usb, ua101_ids); static struct usb_driver ua101_driver = { .name = "snd-ua101", .id_table = ua101_ids, .probe = ua101_probe, .disconnect = ua101_disconnect, #if 0 .suspend = ua101_suspend, .resume = ua101_resume, #endif }; module_usb_driver(ua101_driver);
11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 #ifndef __NET_SCHED_CODEL_IMPL_H #define __NET_SCHED_CODEL_IMPL_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ #include <net/inet_ecn.h> static void codel_params_init(struct codel_params *params) { params->interval = MS2TIME(100); params->target = MS2TIME(5); params->ce_threshold = CODEL_DISABLED_THRESHOLD; params->ce_threshold_mask = 0; params->ce_threshold_selector = 0; params->ecn = false; } static void codel_vars_init(struct codel_vars *vars) { memset(vars, 0, sizeof(*vars)); } static void codel_stats_init(struct codel_stats *stats) { stats->maxpacket = 0; } /* * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) * * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 */ static void codel_Newton_step(struct codel_vars *vars) { u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); val >>= 2; /* avoid overflow in following multiply */ val = (val * invsqrt) >> (32 - 2 + 1); vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; } /* * CoDel control_law is t + interval/sqrt(count) * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid * both sqrt() and divide operation. */ static codel_time_t codel_control_law(codel_time_t t, codel_time_t interval, u32 rec_inv_sqrt) { return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); } static bool codel_should_drop(const struct sk_buff *skb, void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, u32 *backlog, codel_time_t now) { bool ok_to_drop; u32 skb_len; if (!skb) { vars->first_above_time = 0; return false; } skb_len = skb_len_func(skb); vars->ldelay = now - skb_time_func(skb); if (unlikely(skb_len > stats->maxpacket)) stats->maxpacket = skb_len; if (codel_time_before(vars->ldelay, params->target) || *backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; } ok_to_drop = false; if (vars->first_above_time == 0) { /* just went above from below. If we stay above * for at least interval we'll say it's ok to drop */ vars->first_above_time = now + params->interval; } else if (codel_time_after(now, vars->first_above_time)) { ok_to_drop = true; } return ok_to_drop; } static struct sk_buff *codel_dequeue(void *ctx, u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) { struct sk_buff *skb = dequeue_func(vars, ctx); codel_time_t now; bool drop; if (!skb) { vars->dropping = false; return skb; } now = codel_get_time(); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); if (vars->dropping) { if (!drop) { /* sojourn time below target - leave dropping state */ vars->dropping = false; } else if (codel_time_after_eq(now, vars->drop_next)) { /* It's time for the next drop. Drop the current * packet and dequeue the next. The dequeue might * take us out of dropping state. * If not, schedule the next drop. * A large backlog might result in drop rates so high * that the next drop should happen now, * hence the while loop. */ while (vars->dropping && codel_time_after_eq(now, vars->drop_next)) { vars->count++; /* dont care of possible wrap * since there is no more divide */ codel_Newton_step(vars); if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); goto end; } stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); if (!codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now)) { /* leave dropping state */ vars->dropping = false; } else { /* and schedule the next drop */ vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); } } } } else if (drop) { u32 delta; if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); } vars->dropping = true; /* if min went above target close to when we last went below it * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ delta = vars->count - vars->lastcount; if (delta > 1 && codel_time_before(now - vars->drop_next, 16 * params->interval)) { vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. */ codel_Newton_step(vars); } else { vars->count = 1; vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; } vars->lastcount = vars->count; vars->drop_next = codel_control_law(now, params->interval, vars->rec_inv_sqrt); } end: if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) { bool set_ce = true; if (params->ce_threshold_mask) { int dsfield = skb_get_dsfield(skb); set_ce = (dsfield >= 0 && (((u8)dsfield & params->ce_threshold_mask) == params->ce_threshold_selector)); } if (set_ce && INET_ECN_set_ce(skb)) stats->ce_mark++; } return skb; } #endif
10 10 10 9 10 10 3 2 10 9 7 3 1 10 1 1 9 1 1 1 1 1 1 7 1 6 6 5 1 1 1 1 5 2 1 1 6 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 // SPDX-License-Identifier: GPL-2.0+ /* * TI 3410/5052 USB Serial Driver * * Copyright (C) 2004 Texas Instruments * * This driver is based on the Linux io_ti driver, which is * Copyright (C) 2000-2002 Inside Out Networks * Copyright (C) 2001-2002 Greg Kroah-Hartman * * For questions or problems with this driver, contact Texas Instruments * technical support, or Al Borchers <alborchers@steinerpoint.com>, or * Peter Berger <pberger@brimson.com>. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/ioctl.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> /* Configuration ids */ #define TI_BOOT_CONFIG 1 #define TI_ACTIVE_CONFIG 2 /* Vendor and product ids */ #define TI_VENDOR_ID 0x0451 #define IBM_VENDOR_ID 0x04b3 #define STARTECH_VENDOR_ID 0x14b0 #define TI_3410_PRODUCT_ID 0x3410 #define IBM_4543_PRODUCT_ID 0x4543 #define IBM_454B_PRODUCT_ID 0x454b #define IBM_454C_PRODUCT_ID 0x454c #define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */ #define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */ #define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */ #define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */ #define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */ #define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */ /* Multi-Tech vendor and product ids */ #define MTS_VENDOR_ID 0x06E0 #define MTS_GSM_NO_FW_PRODUCT_ID 0xF108 #define MTS_CDMA_NO_FW_PRODUCT_ID 0xF109 #define MTS_CDMA_PRODUCT_ID 0xF110 #define MTS_GSM_PRODUCT_ID 0xF111 #define MTS_EDGE_PRODUCT_ID 0xF112 #define MTS_MT9234MU_PRODUCT_ID 0xF114 #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 #define ABBOTT_STEREO_PLUG_ID 0x3410 #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID #define ABBOTT_STRIP_PORT_ID 0x3420 /* Honeywell vendor and product IDs */ #define HONEYWELL_VENDOR_ID 0x10ac #define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */ /* Moxa UPORT 11x0 vendor and product IDs */ #define MXU1_VENDOR_ID 0x110a #define MXU1_1110_PRODUCT_ID 0x1110 #define MXU1_1130_PRODUCT_ID 0x1130 #define MXU1_1150_PRODUCT_ID 0x1150 #define MXU1_1151_PRODUCT_ID 0x1151 #define MXU1_1131_PRODUCT_ID 0x1131 /* Commands */ #define TI_GET_VERSION 0x01 #define TI_GET_PORT_STATUS 0x02 #define TI_GET_PORT_DEV_INFO 0x03 #define TI_GET_CONFIG 0x04 #define TI_SET_CONFIG 0x05 #define TI_OPEN_PORT 0x06 #define TI_CLOSE_PORT 0x07 #define TI_START_PORT 0x08 #define TI_STOP_PORT 0x09 #define TI_TEST_PORT 0x0A #define TI_PURGE_PORT 0x0B #define TI_RESET_EXT_DEVICE 0x0C #define TI_WRITE_DATA 0x80 #define TI_READ_DATA 0x81 #define TI_REQ_TYPE_CLASS 0x82 /* Module identifiers */ #define TI_I2C_PORT 0x01 #define TI_IEEE1284_PORT 0x02 #define TI_UART1_PORT 0x03 #define TI_UART2_PORT 0x04 #define TI_RAM_PORT 0x05 /* Modem status */ #define TI_MSR_DELTA_CTS 0x01 #define TI_MSR_DELTA_DSR 0x02 #define TI_MSR_DELTA_RI 0x04 #define TI_MSR_DELTA_CD 0x08 #define TI_MSR_CTS 0x10 #define TI_MSR_DSR 0x20 #define TI_MSR_RI 0x40 #define TI_MSR_CD 0x80 #define TI_MSR_DELTA_MASK 0x0F #define TI_MSR_MASK 0xF0 /* Line status */ #define TI_LSR_OVERRUN_ERROR 0x01 #define TI_LSR_PARITY_ERROR 0x02 #define TI_LSR_FRAMING_ERROR 0x04 #define TI_LSR_BREAK 0x08 #define TI_LSR_ERROR 0x0F #define TI_LSR_RX_FULL 0x10 #define TI_LSR_TX_EMPTY 0x20 #define TI_LSR_TX_EMPTY_BOTH 0x40 /* Line control */ #define TI_LCR_BREAK 0x40 /* Modem control */ #define TI_MCR_LOOP 0x04 #define TI_MCR_DTR 0x10 #define TI_MCR_RTS 0x20 /* Mask settings */ #define TI_UART_ENABLE_RTS_IN 0x0001 #define TI_UART_DISABLE_RTS 0x0002 #define TI_UART_ENABLE_PARITY_CHECKING 0x0008 #define TI_UART_ENABLE_DSR_OUT 0x0010 #define TI_UART_ENABLE_CTS_OUT 0x0020 #define TI_UART_ENABLE_X_OUT 0x0040 #define TI_UART_ENABLE_XA_OUT 0x0080 #define TI_UART_ENABLE_X_IN 0x0100 #define TI_UART_ENABLE_DTR_IN 0x0800 #define TI_UART_DISABLE_DTR 0x1000 #define TI_UART_ENABLE_MS_INTS 0x2000 #define TI_UART_ENABLE_AUTO_START_DMA 0x4000 /* Parity */ #define TI_UART_NO_PARITY 0x00 #define TI_UART_ODD_PARITY 0x01 #define TI_UART_EVEN_PARITY 0x02 #define TI_UART_MARK_PARITY 0x03 #define TI_UART_SPACE_PARITY 0x04 /* Stop bits */ #define TI_UART_1_STOP_BITS 0x00 #define TI_UART_1_5_STOP_BITS 0x01 #define TI_UART_2_STOP_BITS 0x02 /* Bits per character */ #define TI_UART_5_DATA_BITS 0x00 #define TI_UART_6_DATA_BITS 0x01 #define TI_UART_7_DATA_BITS 0x02 #define TI_UART_8_DATA_BITS 0x03 /* 232/485 modes */ #define TI_UART_232 0x00 #define TI_UART_485_RECEIVER_DISABLED 0x01 #define TI_UART_485_RECEIVER_ENABLED 0x02 /* Pipe transfer mode and timeout */ #define TI_PIPE_MODE_CONTINUOUS 0x01 #define TI_PIPE_MODE_MASK 0x03 #define TI_PIPE_TIMEOUT_MASK 0x7C #define TI_PIPE_TIMEOUT_ENABLE 0x80 /* Config struct */ struct ti_uart_config { __be16 wBaudRate; __be16 wFlags; u8 bDataBits; u8 bParity; u8 bStopBits; char cXon; char cXoff; u8 bUartMode; }; /* Get port status */ struct ti_port_status { u8 bCmdCode; u8 bModuleId; u8 bErrorCode; u8 bMSR; u8 bLSR; }; /* Purge modes */ #define TI_PURGE_OUTPUT 0x00 #define TI_PURGE_INPUT 0x80 /* Read/Write data */ #define TI_RW_DATA_ADDR_SFR 0x10 #define TI_RW_DATA_ADDR_IDATA 0x20 #define TI_RW_DATA_ADDR_XDATA 0x30 #define TI_RW_DATA_ADDR_CODE 0x40 #define TI_RW_DATA_ADDR_GPIO 0x50 #define TI_RW_DATA_ADDR_I2C 0x60 #define TI_RW_DATA_ADDR_FLASH 0x70 #define TI_RW_DATA_ADDR_DSP 0x80 #define TI_RW_DATA_UNSPECIFIED 0x00 #define TI_RW_DATA_BYTE 0x01 #define TI_RW_DATA_WORD 0x02 #define TI_RW_DATA_DOUBLE_WORD 0x04 struct ti_write_data_bytes { u8 bAddrType; u8 bDataType; u8 bDataCounter; __be16 wBaseAddrHi; __be16 wBaseAddrLo; u8 bData[]; } __packed; struct ti_read_data_request { u8 bAddrType; u8 bDataType; u8 bDataCounter; __be16 wBaseAddrHi; __be16 wBaseAddrLo; } __packed; struct ti_read_data_bytes { u8 bCmdCode; u8 bModuleId; u8 bErrorCode; u8 bData[]; }; /* Interrupt struct */ struct ti_interrupt { u8 bICode; u8 bIInfo; }; /* Interrupt codes */ #define TI_CODE_HARDWARE_ERROR 0xFF #define TI_CODE_DATA_ERROR 0x03 #define TI_CODE_MODEM_STATUS 0x04 /* Download firmware max packet size */ #define TI_DOWNLOAD_MAX_PACKET_SIZE 64 /* Firmware image header */ struct ti_firmware_header { __le16 wLength; u8 bCheckSum; } __packed; /* UART addresses */ #define TI_UART1_BASE_ADDR 0xFFA0 /* UART 1 base address */ #define TI_UART2_BASE_ADDR 0xFFB0 /* UART 2 base address */ #define TI_UART_OFFSET_LCR 0x0002 /* UART MCR register offset */ #define TI_UART_OFFSET_MCR 0x0004 /* UART MCR register offset */ #define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>" #define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver" #define TI_FIRMWARE_BUF_SIZE 16284 #define TI_TRANSFER_TIMEOUT 2 /* read urb states */ #define TI_READ_URB_RUNNING 0 #define TI_READ_URB_STOPPING 1 #define TI_READ_URB_STOPPED 2 #define TI_EXTRA_VID_PID_COUNT 5 struct ti_port { int tp_is_open; u8 tp_msr; u8 tp_shadow_mcr; u8 tp_uart_mode; /* 232 or 485 modes */ unsigned int tp_uart_base_addr; struct ti_device *tp_tdev; struct usb_serial_port *tp_port; spinlock_t tp_lock; int tp_read_urb_state; int tp_write_urb_in_use; }; struct ti_device { struct mutex td_open_close_lock; int td_open_port_count; struct usb_serial *td_serial; int td_is_3410; bool td_rs485_only; }; static int ti_startup(struct usb_serial *serial); static void ti_release(struct usb_serial *serial); static int ti_port_probe(struct usb_serial_port *port); static void ti_port_remove(struct usb_serial_port *port); static int ti_open(struct tty_struct *tty, struct usb_serial_port *port); static void ti_close(struct usb_serial_port *port); static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); static unsigned int ti_write_room(struct tty_struct *tty); static unsigned int ti_chars_in_buffer(struct tty_struct *tty); static bool ti_tx_empty(struct usb_serial_port *port); static void ti_throttle(struct tty_struct *tty); static void ti_unthrottle(struct tty_struct *tty); static void ti_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int ti_tiocmget(struct tty_struct *tty); static int ti_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int ti_break(struct tty_struct *tty, int break_state); static void ti_interrupt_callback(struct urb *urb); static void ti_bulk_in_callback(struct urb *urb); static void ti_bulk_out_callback(struct urb *urb); static void ti_recv(struct usb_serial_port *port, unsigned char *data, int length); static void ti_send(struct ti_port *tport); static int ti_set_mcr(struct ti_port *tport, unsigned int mcr); static int ti_get_lsr(struct ti_port *tport, u8 *lsr); static void ti_get_serial_info(struct tty_struct *tty, struct serial_struct *ss); static void ti_handle_new_msr(struct ti_port *tport, u8 msr); static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty); static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty); static int ti_command_out_sync(struct usb_device *udev, u8 command, u16 moduleid, u16 value, void *data, int size); static int ti_command_in_sync(struct usb_device *udev, u8 command, u16 moduleid, u16 value, void *data, int size); static int ti_port_cmd_out(struct usb_serial_port *port, u8 command, u16 value, void *data, int size); static int ti_port_cmd_in(struct usb_serial_port *port, u8 command, u16 value, void *data, int size); static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev, unsigned long addr, u8 mask, u8 byte); static int ti_download_firmware(struct ti_device *tdev); static const struct usb_device_id ti_id_table_3410[] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, { } /* terminator */ }; static const struct usb_device_id ti_id_table_5052[] = { { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, { } }; static const struct usb_device_id ti_id_table_combined[] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, { } /* terminator */ }; static struct usb_serial_driver ti_1port_device = { .driver = { .name = "ti_usb_3410_5052_1", }, .description = "TI USB 3410 1 port adapter", .id_table = ti_id_table_3410, .num_ports = 1, .num_bulk_out = 1, .attach = ti_startup, .release = ti_release, .port_probe = ti_port_probe, .port_remove = ti_port_remove, .open = ti_open, .close = ti_close, .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .get_serial = ti_get_serial_info, .set_termios = ti_set_termios, .tiocmget = ti_tiocmget, .tiocmset = ti_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .break_ctl = ti_break, .read_int_callback = ti_interrupt_callback, .read_bulk_callback = ti_bulk_in_callback, .write_bulk_callback = ti_bulk_out_callback, }; static struct usb_serial_driver ti_2port_device = { .driver = { .name = "ti_usb_3410_5052_2", }, .description = "TI USB 5052 2 port adapter", .id_table = ti_id_table_5052, .num_ports = 2, .num_bulk_out = 1, .attach = ti_startup, .release = ti_release, .port_probe = ti_port_probe, .port_remove = ti_port_remove, .open = ti_open, .close = ti_close, .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .get_serial = ti_get_serial_info, .set_termios = ti_set_termios, .tiocmget = ti_tiocmget, .tiocmset = ti_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .break_ctl = ti_break, .read_int_callback = ti_interrupt_callback, .read_bulk_callback = ti_bulk_in_callback, .write_bulk_callback = ti_bulk_out_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &ti_1port_device, &ti_2port_device, NULL }; MODULE_AUTHOR(TI_DRIVER_AUTHOR); MODULE_DESCRIPTION(TI_DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("ti_3410.fw"); MODULE_FIRMWARE("ti_5052.fw"); MODULE_FIRMWARE("mts_cdma.fw"); MODULE_FIRMWARE("mts_gsm.fw"); MODULE_FIRMWARE("mts_edge.fw"); MODULE_FIRMWARE("mts_mt9234mu.fw"); MODULE_FIRMWARE("mts_mt9234zba.fw"); MODULE_FIRMWARE("moxa/moxa-1110.fw"); MODULE_FIRMWARE("moxa/moxa-1130.fw"); MODULE_FIRMWARE("moxa/moxa-1131.fw"); MODULE_FIRMWARE("moxa/moxa-1150.fw"); MODULE_FIRMWARE("moxa/moxa-1151.fw"); MODULE_DEVICE_TABLE(usb, ti_id_table_combined); module_usb_serial_driver(serial_drivers, ti_id_table_combined); static int ti_startup(struct usb_serial *serial) { struct ti_device *tdev; struct usb_device *dev = serial->dev; struct usb_host_interface *cur_altsetting; int num_endpoints; u16 vid, pid; int status; dev_dbg(&dev->dev, "%s - product 0x%4X, num configurations %d, configuration value %d\n", __func__, le16_to_cpu(dev->descriptor.idProduct), dev->descriptor.bNumConfigurations, dev->actconfig->desc.bConfigurationValue); tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL); if (!tdev) return -ENOMEM; mutex_init(&tdev->td_open_close_lock); tdev->td_serial = serial; usb_set_serial_data(serial, tdev); /* determine device type */ if (serial->type == &ti_1port_device) tdev->td_is_3410 = 1; dev_dbg(&dev->dev, "%s - device type is %s\n", __func__, tdev->td_is_3410 ? "3410" : "5052"); vid = le16_to_cpu(dev->descriptor.idVendor); pid = le16_to_cpu(dev->descriptor.idProduct); if (vid == MXU1_VENDOR_ID) { switch (pid) { case MXU1_1130_PRODUCT_ID: case MXU1_1131_PRODUCT_ID: tdev->td_rs485_only = true; break; } } cur_altsetting = serial->interface->cur_altsetting; num_endpoints = cur_altsetting->desc.bNumEndpoints; /* if we have only 1 configuration and 1 endpoint, download firmware */ if (dev->descriptor.bNumConfigurations == 1 && num_endpoints == 1) { status = ti_download_firmware(tdev); if (status != 0) goto free_tdev; /* 3410 must be reset, 5052 resets itself */ if (tdev->td_is_3410) { msleep_interruptible(100); usb_reset_device(dev); } status = -ENODEV; goto free_tdev; } /* the second configuration must be set */ if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) { status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG); status = status ? status : -ENODEV; goto free_tdev; } if (serial->num_bulk_in < serial->num_ports || serial->num_bulk_out < serial->num_ports) { dev_err(&serial->interface->dev, "missing endpoints\n"); status = -ENODEV; goto free_tdev; } return 0; free_tdev: kfree(tdev); usb_set_serial_data(serial, NULL); return status; } static void ti_release(struct usb_serial *serial) { struct ti_device *tdev = usb_get_serial_data(serial); kfree(tdev); } static int ti_port_probe(struct usb_serial_port *port) { struct ti_port *tport; tport = kzalloc(sizeof(*tport), GFP_KERNEL); if (!tport) return -ENOMEM; spin_lock_init(&tport->tp_lock); if (port == port->serial->port[0]) tport->tp_uart_base_addr = TI_UART1_BASE_ADDR; else tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; tport->tp_port = port; tport->tp_tdev = usb_get_serial_data(port->serial); if (tport->tp_tdev->td_rs485_only) tport->tp_uart_mode = TI_UART_485_RECEIVER_DISABLED; else tport->tp_uart_mode = TI_UART_232; usb_set_serial_port_data(port, tport); /* * The TUSB5052 LSR does not tell when the transmitter shift register * has emptied so add a one-character drain delay. */ if (!tport->tp_tdev->td_is_3410) port->port.drain_delay = 1; return 0; } static void ti_port_remove(struct usb_serial_port *port) { struct ti_port *tport; tport = usb_get_serial_port_data(port); kfree(tport); } static int ti_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ti_port *tport = usb_get_serial_port_data(port); struct ti_device *tdev; struct usb_device *dev; struct urb *urb; int status; u16 open_settings; open_settings = (TI_PIPE_MODE_CONTINUOUS | TI_PIPE_TIMEOUT_ENABLE | (TI_TRANSFER_TIMEOUT << 2)); dev = port->serial->dev; tdev = tport->tp_tdev; /* only one open on any port on a device at a time */ if (mutex_lock_interruptible(&tdev->td_open_close_lock)) return -ERESTARTSYS; tport->tp_msr = 0; tport->tp_shadow_mcr |= (TI_MCR_RTS | TI_MCR_DTR); /* start interrupt urb the first time a port is opened on this device */ if (tdev->td_open_port_count == 0) { dev_dbg(&port->dev, "%s - start interrupt in urb\n", __func__); urb = tdev->td_serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb\n", __func__); status = -EINVAL; goto release_lock; } urb->context = tdev; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - submit interrupt urb failed, %d\n", __func__, status); goto release_lock; } } if (tty) ti_set_termios(tty, port, &tty->termios); status = ti_port_cmd_out(port, TI_OPEN_PORT, open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); goto unlink_int_urb; } status = ti_port_cmd_out(port, TI_START_PORT, 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start command, %d\n", __func__, status); goto unlink_int_urb; } status = ti_port_cmd_out(port, TI_PURGE_PORT, TI_PURGE_INPUT, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot clear input buffers, %d\n", __func__, status); goto unlink_int_urb; } status = ti_port_cmd_out(port, TI_PURGE_PORT, TI_PURGE_OUTPUT, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot clear output buffers, %d\n", __func__, status); goto unlink_int_urb; } /* reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); if (tty) ti_set_termios(tty, port, &tty->termios); status = ti_port_cmd_out(port, TI_OPEN_PORT, open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command (2), %d\n", __func__, status); goto unlink_int_urb; } status = ti_port_cmd_out(port, TI_START_PORT, 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start command (2), %d\n", __func__, status); goto unlink_int_urb; } /* start read urb */ urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb\n", __func__); status = -EINVAL; goto unlink_int_urb; } tport->tp_read_urb_state = TI_READ_URB_RUNNING; urb->context = tport; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - submit read urb failed, %d\n", __func__, status); goto unlink_int_urb; } tport->tp_is_open = 1; ++tdev->td_open_port_count; goto release_lock; unlink_int_urb: if (tdev->td_open_port_count == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_lock: mutex_unlock(&tdev->td_open_close_lock); return status; } static void ti_close(struct usb_serial_port *port) { struct ti_device *tdev; struct ti_port *tport; int status; unsigned long flags; tdev = usb_get_serial_data(port->serial); tport = usb_get_serial_port_data(port); tport->tp_is_open = 0; usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); tport->tp_write_urb_in_use = 0; spin_lock_irqsave(&tport->tp_lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); status = ti_port_cmd_out(port, TI_CLOSE_PORT, 0, NULL, 0); if (status) dev_err(&port->dev, "%s - cannot send close port command, %d\n" , __func__, status); mutex_lock(&tdev->td_open_close_lock); --tdev->td_open_port_count; if (tdev->td_open_port_count == 0) { /* last port is closed, shut down interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); } mutex_unlock(&tdev->td_open_close_lock); } static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct ti_port *tport = usb_get_serial_port_data(port); if (count == 0) { return 0; } if (!tport->tp_is_open) return -ENODEV; count = kfifo_in_locked(&port->write_fifo, data, count, &tport->tp_lock); ti_send(tport); return count; } static unsigned int ti_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int room; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); room = kfifo_avail(&port->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, room); return room; } static unsigned int ti_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int chars; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); chars = kfifo_len(&port->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars); return chars; } static bool ti_tx_empty(struct usb_serial_port *port) { struct ti_port *tport = usb_get_serial_port_data(port); u8 lsr, mask; int ret; /* * TUSB5052 does not have the TEMT bit to tell if the shift register * is empty. */ if (tport->tp_tdev->td_is_3410) mask = TI_LSR_TX_EMPTY_BOTH; else mask = TI_LSR_TX_EMPTY; ret = ti_get_lsr(tport, &lsr); if (!ret && !(lsr & mask)) return false; return true; } static void ti_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); if (I_IXOFF(tty) || C_CRTSCTS(tty)) ti_stop_read(tport, tty); } static void ti_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int status; if (I_IXOFF(tty) || C_CRTSCTS(tty)) { status = ti_restart_read(tport, tty); if (status) dev_err(&port->dev, "%s - cannot restart read, %d\n", __func__, status); } } static void ti_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct ti_port *tport = usb_get_serial_port_data(port); struct ti_uart_config *config; int baud; int status; unsigned int mcr; u16 wbaudrate; u16 wflags = 0; config = kmalloc(sizeof(*config), GFP_KERNEL); if (!config) return; /* these flags must be set */ wflags |= TI_UART_ENABLE_MS_INTS; wflags |= TI_UART_ENABLE_AUTO_START_DMA; config->bUartMode = tport->tp_uart_mode; switch (C_CSIZE(tty)) { case CS5: config->bDataBits = TI_UART_5_DATA_BITS; break; case CS6: config->bDataBits = TI_UART_6_DATA_BITS; break; case CS7: config->bDataBits = TI_UART_7_DATA_BITS; break; default: case CS8: config->bDataBits = TI_UART_8_DATA_BITS; break; } /* CMSPAR isn't supported by this driver */ tty->termios.c_cflag &= ~CMSPAR; if (C_PARENB(tty)) { if (C_PARODD(tty)) { wflags |= TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_ODD_PARITY; } else { wflags |= TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_EVEN_PARITY; } } else { wflags &= ~TI_UART_ENABLE_PARITY_CHECKING; config->bParity = TI_UART_NO_PARITY; } if (C_CSTOPB(tty)) config->bStopBits = TI_UART_2_STOP_BITS; else config->bStopBits = TI_UART_1_STOP_BITS; if (C_CRTSCTS(tty)) { /* RTS flow control must be off to drop RTS for baud rate B0 */ if ((C_BAUD(tty)) != B0) wflags |= TI_UART_ENABLE_RTS_IN; wflags |= TI_UART_ENABLE_CTS_OUT; } else { ti_restart_read(tport, tty); } if (I_IXOFF(tty) || I_IXON(tty)) { config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); if (I_IXOFF(tty)) wflags |= TI_UART_ENABLE_X_IN; else ti_restart_read(tport, tty); if (I_IXON(tty)) wflags |= TI_UART_ENABLE_X_OUT; } baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; if (tport->tp_tdev->td_is_3410) wbaudrate = (923077 + baud/2) / baud; else wbaudrate = (461538 + baud/2) / baud; /* FIXME: Should calculate resulting baud here and report it back */ if ((C_BAUD(tty)) != B0) tty_encode_baud_rate(tty, baud, baud); dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n", __func__, baud, wbaudrate, wflags, config->bDataBits, config->bParity, config->bStopBits, config->cXon, config->cXoff, config->bUartMode); config->wBaudRate = cpu_to_be16(wbaudrate); config->wFlags = cpu_to_be16(wflags); status = ti_port_cmd_out(port, TI_SET_CONFIG, 0, config, sizeof(*config)); if (status) dev_err(&port->dev, "%s - cannot set config on port %d, %d\n", __func__, port->port_number, status); /* SET_CONFIG asserts RTS and DTR, reset them correctly */ mcr = tport->tp_shadow_mcr; /* if baud rate is B0, clear RTS and DTR */ if (C_BAUD(tty) == B0) mcr &= ~(TI_MCR_DTR | TI_MCR_RTS); status = ti_set_mcr(tport, mcr); if (status) dev_err(&port->dev, "%s - cannot set modem control on port %d, %d\n", __func__, port->port_number, status); kfree(config); } static int ti_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int result; unsigned int msr; unsigned int mcr; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); msr = tport->tp_msr; mcr = tport->tp_shadow_mcr; spin_unlock_irqrestore(&tport->tp_lock, flags); result = ((mcr & TI_MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & TI_MCR_RTS) ? TIOCM_RTS : 0) | ((mcr & TI_MCR_LOOP) ? TIOCM_LOOP : 0) | ((msr & TI_MSR_CTS) ? TIOCM_CTS : 0) | ((msr & TI_MSR_CD) ? TIOCM_CAR : 0) | ((msr & TI_MSR_RI) ? TIOCM_RI : 0) | ((msr & TI_MSR_DSR) ? TIOCM_DSR : 0); dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result); return result; } static int ti_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); mcr = tport->tp_shadow_mcr; if (set & TIOCM_RTS) mcr |= TI_MCR_RTS; if (set & TIOCM_DTR) mcr |= TI_MCR_DTR; if (set & TIOCM_LOOP) mcr |= TI_MCR_LOOP; if (clear & TIOCM_RTS) mcr &= ~TI_MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~TI_MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~TI_MCR_LOOP; spin_unlock_irqrestore(&tport->tp_lock, flags); return ti_set_mcr(tport, mcr); } static int ti_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); int status; dev_dbg(&port->dev, "%s - state = %d\n", __func__, break_state); status = ti_write_byte(port, tport->tp_tdev, tport->tp_uart_base_addr + TI_UART_OFFSET_LCR, TI_LCR_BREAK, break_state == -1 ? TI_LCR_BREAK : 0); if (status) { dev_dbg(&port->dev, "%s - error setting break, %d\n", __func__, status); return status; } return 0; } static int ti_get_port_from_code(unsigned char code) { return (code >> 6) & 0x01; } static int ti_get_func_from_code(unsigned char code) { return code & 0x0f; } static void ti_interrupt_callback(struct urb *urb) { struct ti_device *tdev = urb->context; struct usb_serial_port *port; struct usb_serial *serial = tdev->td_serial; struct ti_port *tport; struct device *dev = &urb->dev->dev; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int status = urb->status; int retval; u8 msr; switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status); return; default: dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status); goto exit; } if (length != 2) { dev_dbg(dev, "%s - bad packet size, %d\n", __func__, length); goto exit; } if (data[0] == TI_CODE_HARDWARE_ERROR) { dev_err(dev, "%s - hardware error, %d\n", __func__, data[1]); goto exit; } port_number = ti_get_port_from_code(data[0]); function = ti_get_func_from_code(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, data 0x%02X\n", __func__, port_number, function, data[1]); if (port_number >= serial->num_ports) { dev_err(dev, "%s - bad port number, %d\n", __func__, port_number); goto exit; } port = serial->port[port_number]; tport = usb_get_serial_port_data(port); if (!tport) goto exit; switch (function) { case TI_CODE_DATA_ERROR: dev_err(dev, "%s - DATA ERROR, port %d, data 0x%02X\n", __func__, port_number, data[1]); break; case TI_CODE_MODEM_STATUS: msr = data[1]; dev_dbg(dev, "%s - port %d, msr 0x%02X\n", __func__, port_number, msr); ti_handle_new_msr(tport, msr); break; default: dev_err(dev, "%s - unknown interrupt code, 0x%02X\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "%s - resubmit interrupt urb failed, %d\n", __func__, retval); } static void ti_bulk_in_callback(struct urb *urb) { struct ti_port *tport = urb->context; struct usb_serial_port *port = tport->tp_port; struct device *dev = &urb->dev->dev; int status = urb->status; unsigned long flags; int retval = 0; switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status); return; default: dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status); } if (status == -EPIPE) goto exit; if (status) { dev_err(dev, "%s - stopping read!\n", __func__); return; } if (urb->actual_length) { usb_serial_debug_data(dev, __func__, urb->actual_length, urb->transfer_buffer); if (!tport->tp_is_open) dev_dbg(dev, "%s - port closed, dropping data\n", __func__); else ti_recv(port, urb->transfer_buffer, urb->actual_length); spin_lock_irqsave(&tport->tp_lock, flags); port->icount.rx += urb->actual_length; spin_unlock_irqrestore(&tport->tp_lock, flags); } exit: /* continue to read unless stopping */ spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) retval = usb_submit_urb(urb, GFP_ATOMIC); else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) tport->tp_read_urb_state = TI_READ_URB_STOPPED; spin_unlock_irqrestore(&tport->tp_lock, flags); if (retval) dev_err(dev, "%s - resubmit read urb failed, %d\n", __func__, retval); } static void ti_bulk_out_callback(struct urb *urb) { struct ti_port *tport = urb->context; struct usb_serial_port *port = tport->tp_port; int status = urb->status; tport->tp_write_urb_in_use = 0; switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(&port->dev, "%s - urb shutting down, %d\n", __func__, status); return; default: dev_err_console(port, "%s - nonzero urb status, %d\n", __func__, status); } /* send any buffered data */ ti_send(tport); } static void ti_recv(struct usb_serial_port *port, unsigned char *data, int length) { int cnt; do { cnt = tty_insert_flip_string(&port->port, data, length); if (cnt < length) { dev_err(&port->dev, "%s - dropping data, %d bytes lost\n", __func__, length - cnt); if (cnt == 0) break; } tty_flip_buffer_push(&port->port); data += cnt; length -= cnt; } while (length > 0); } static void ti_send(struct ti_port *tport) { int count, result; struct usb_serial_port *port = tport->tp_port; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_write_urb_in_use) goto unlock; count = kfifo_out(&port->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) goto unlock; tport->tp_write_urb_in_use = 1; spin_unlock_irqrestore(&tport->tp_lock, flags); usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer); usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), port->write_urb->transfer_buffer, count, ti_bulk_out_callback, tport); result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - submit write urb failed, %d\n", __func__, result); tport->tp_write_urb_in_use = 0; /* TODO: reschedule ti_send */ } else { spin_lock_irqsave(&tport->tp_lock, flags); port->icount.tx += count; spin_unlock_irqrestore(&tport->tp_lock, flags); } /* more room in the buffer for new writes, wakeup */ tty_port_tty_wakeup(&port->port); return; unlock: spin_unlock_irqrestore(&tport->tp_lock, flags); return; } static int ti_set_mcr(struct ti_port *tport, unsigned int mcr) { unsigned long flags; int status; status = ti_write_byte(tport->tp_port, tport->tp_tdev, tport->tp_uart_base_addr + TI_UART_OFFSET_MCR, TI_MCR_RTS | TI_MCR_DTR | TI_MCR_LOOP, mcr); spin_lock_irqsave(&tport->tp_lock, flags); if (!status) tport->tp_shadow_mcr = mcr; spin_unlock_irqrestore(&tport->tp_lock, flags); return status; } static int ti_get_lsr(struct ti_port *tport, u8 *lsr) { int size, status; struct usb_serial_port *port = tport->tp_port; struct ti_port_status *data; size = sizeof(struct ti_port_status); data = kmalloc(size, GFP_KERNEL); if (!data) return -ENOMEM; status = ti_port_cmd_in(port, TI_GET_PORT_STATUS, 0, data, size); if (status) { dev_err(&port->dev, "%s - get port status command failed, %d\n", __func__, status); goto free_data; } dev_dbg(&port->dev, "%s - lsr 0x%02X\n", __func__, data->bLSR); *lsr = data->bLSR; free_data: kfree(data); return status; } static void ti_get_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; struct ti_port *tport = usb_get_serial_port_data(port); ss->baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800; } static void ti_handle_new_msr(struct ti_port *tport, u8 msr) { struct async_icount *icount; struct tty_struct *tty; unsigned long flags; dev_dbg(&tport->tp_port->dev, "%s - msr 0x%02X\n", __func__, msr); if (msr & TI_MSR_DELTA_MASK) { spin_lock_irqsave(&tport->tp_lock, flags); icount = &tport->tp_port->icount; if (msr & TI_MSR_DELTA_CTS) icount->cts++; if (msr & TI_MSR_DELTA_DSR) icount->dsr++; if (msr & TI_MSR_DELTA_CD) icount->dcd++; if (msr & TI_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&tport->tp_port->port.delta_msr_wait); spin_unlock_irqrestore(&tport->tp_lock, flags); } tport->tp_msr = msr & TI_MSR_MASK; /* handle CTS flow control */ tty = tty_port_tty_get(&tport->tp_port->port); if (tty && C_CRTSCTS(tty)) { if (msr & TI_MSR_CTS) tty_wakeup(tty); } tty_kref_put(tty); } static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) tport->tp_read_urb_state = TI_READ_URB_STOPPING; spin_unlock_irqrestore(&tport->tp_lock, flags); } static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&tport->tp_lock, flags); if (tport->tp_read_urb_state == TI_READ_URB_STOPPED) { tport->tp_read_urb_state = TI_READ_URB_RUNNING; urb = tport->tp_port->read_urb; spin_unlock_irqrestore(&tport->tp_lock, flags); urb->context = tport; status = usb_submit_urb(urb, GFP_KERNEL); } else { tport->tp_read_urb_state = TI_READ_URB_RUNNING; spin_unlock_irqrestore(&tport->tp_lock, flags); } return status; } static int ti_command_out_sync(struct usb_device *udev, u8 command, u16 moduleid, u16 value, void *data, int size) { int status; status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), command, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, value, moduleid, data, size, 1000); if (status < 0) return status; return 0; } static int ti_command_in_sync(struct usb_device *udev, u8 command, u16 moduleid, u16 value, void *data, int size) { int status; status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), command, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, moduleid, data, size, 1000); if (status == size) status = 0; else if (status >= 0) status = -ECOMM; return status; } static int ti_port_cmd_out(struct usb_serial_port *port, u8 command, u16 value, void *data, int size) { return ti_command_out_sync(port->serial->dev, command, TI_UART1_PORT + port->port_number, value, data, size); } static int ti_port_cmd_in(struct usb_serial_port *port, u8 command, u16 value, void *data, int size) { return ti_command_in_sync(port->serial->dev, command, TI_UART1_PORT + port->port_number, value, data, size); } static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev, unsigned long addr, u8 mask, u8 byte) { int status; unsigned int size; struct ti_write_data_bytes *data; dev_dbg(&port->dev, "%s - addr 0x%08lX, mask 0x%02X, byte 0x%02X\n", __func__, addr, mask, byte); size = sizeof(struct ti_write_data_bytes) + 2; data = kmalloc(size, GFP_KERNEL); if (!data) return -ENOMEM; data->bAddrType = TI_RW_DATA_ADDR_XDATA; data->bDataType = TI_RW_DATA_BYTE; data->bDataCounter = 1; data->wBaseAddrHi = cpu_to_be16(addr>>16); data->wBaseAddrLo = cpu_to_be16(addr); data->bData[0] = mask; data->bData[1] = byte; status = ti_command_out_sync(port->serial->dev, TI_WRITE_DATA, TI_RAM_PORT, 0, data, size); if (status < 0) dev_err(&port->dev, "%s - failed, %d\n", __func__, status); kfree(data); return status; } static int ti_do_download(struct usb_device *dev, int pipe, u8 *buffer, int size) { int pos; u8 cs = 0; int done; struct ti_firmware_header *header; int status = 0; int len; for (pos = sizeof(struct ti_firmware_header); pos < size; pos++) cs = (u8)(cs + buffer[pos]); header = (struct ti_firmware_header *)buffer; header->wLength = cpu_to_le16(size - sizeof(*header)); header->bCheckSum = cs; dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__); for (pos = 0; pos < size; pos += done) { len = min(size - pos, TI_DOWNLOAD_MAX_PACKET_SIZE); status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done, 1000); if (status) break; } return status; } static int ti_download_firmware(struct ti_device *tdev) { int status; int buffer_size; u8 *buffer; struct usb_device *dev = tdev->td_serial->dev; unsigned int pipe = usb_sndbulkpipe(dev, tdev->td_serial->port[0]->bulk_out_endpointAddress); const struct firmware *fw_p; char buf[32]; if (le16_to_cpu(dev->descriptor.idVendor) == MXU1_VENDOR_ID) { snprintf(buf, sizeof(buf), "moxa/moxa-%04x.fw", le16_to_cpu(dev->descriptor.idProduct)); status = request_firmware(&fw_p, buf, &dev->dev); goto check_firmware; } /* try ID specific firmware first, then try generic firmware */ sprintf(buf, "ti_usb-v%04x-p%04x.fw", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); status = request_firmware(&fw_p, buf, &dev->dev); if (status != 0) { buf[0] = '\0'; if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { switch (le16_to_cpu(dev->descriptor.idProduct)) { case MTS_CDMA_PRODUCT_ID: strcpy(buf, "mts_cdma.fw"); break; case MTS_GSM_PRODUCT_ID: strcpy(buf, "mts_gsm.fw"); break; case MTS_EDGE_PRODUCT_ID: strcpy(buf, "mts_edge.fw"); break; case MTS_MT9234MU_PRODUCT_ID: strcpy(buf, "mts_mt9234mu.fw"); break; case MTS_MT9234ZBA_PRODUCT_ID: strcpy(buf, "mts_mt9234zba.fw"); break; case MTS_MT9234ZBAOLD_PRODUCT_ID: strcpy(buf, "mts_mt9234zba.fw"); break; } } if (buf[0] == '\0') { if (tdev->td_is_3410) strcpy(buf, "ti_3410.fw"); else strcpy(buf, "ti_5052.fw"); } status = request_firmware(&fw_p, buf, &dev->dev); } check_firmware: if (status) { dev_err(&dev->dev, "%s - firmware not found\n", __func__); return -ENOENT; } if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size); release_firmware(fw_p); return -ENOENT; } buffer_size = TI_FIRMWARE_BUF_SIZE + sizeof(struct ti_firmware_header); buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer) { memcpy(buffer, fw_p->data, fw_p->size); memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); status = ti_do_download(dev, pipe, buffer, fw_p->size); kfree(buffer); } else { status = -ENOMEM; } release_firmware(fw_p); if (status) { dev_err(&dev->dev, "%s - error downloading firmware, %d\n", __func__, status); return status; } dev_dbg(&dev->dev, "%s - download successful\n", __func__); return 0; }
120 113 119 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/core/netprio_cgroup.c Priority Control Group * * Authors: Neil Horman <nhorman@tuxdriver.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/cgroup.h> #include <linux/rcupdate.h> #include <linux/atomic.h> #include <linux/sched/task.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> #include <net/netprio_cgroup.h> #include <linux/fdtable.h> /* * netprio allocates per-net_device priomap array which is indexed by * css->id. Limiting css ID to 16bits doesn't lose anything. */ #define NETPRIO_ID_MAX USHRT_MAX #define PRIOMAP_MIN_SZ 128 /* * Extend @dev->priomap so that it's large enough to accommodate * @target_idx. @dev->priomap.priomap_len > @target_idx after successful * return. Must be called under rtnl lock. */ static int extend_netdev_table(struct net_device *dev, u32 target_idx) { struct netprio_map *old, *new; size_t new_sz, new_len; /* is the existing priomap large enough? */ old = rtnl_dereference(dev->priomap); if (old && old->priomap_len > target_idx) return 0; /* * Determine the new size. Let's keep it power-of-two. We start * from PRIOMAP_MIN_SZ and double it until it's large enough to * accommodate @target_idx. */ new_sz = PRIOMAP_MIN_SZ; while (true) { new_len = (new_sz - offsetof(struct netprio_map, priomap)) / sizeof(new->priomap[0]); if (new_len > target_idx) break; new_sz *= 2; /* overflowed? */ if (WARN_ON(new_sz < PRIOMAP_MIN_SZ)) return -ENOSPC; } /* allocate & copy */ new = kzalloc(new_sz, GFP_KERNEL); if (!new) return -ENOMEM; if (old) memcpy(new->priomap, old->priomap, old->priomap_len * sizeof(old->priomap[0])); new->priomap_len = new_len; /* install the new priomap */ rcu_assign_pointer(dev->priomap, new); if (old) kfree_rcu(old, rcu); return 0; } /** * netprio_prio - return the effective netprio of a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * * Should be called under RCU read or rtnl lock. */ static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev) { struct netprio_map *map = rcu_dereference_rtnl(dev->priomap); int id = css->id; if (map && id < map->priomap_len) return map->priomap[id]; return 0; } /** * netprio_set_prio - set netprio on a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * @prio: prio to set * * Set netprio to @prio on @css-@dev pair. Should be called under rtnl * lock and may fail under memory pressure for non-zero @prio. */ static int netprio_set_prio(struct cgroup_subsys_state *css, struct net_device *dev, u32 prio) { struct netprio_map *map; int id = css->id; int ret; /* avoid extending priomap for zero writes */ map = rtnl_dereference(dev->priomap); if (!prio && (!map || map->priomap_len <= id)) return 0; ret = extend_netdev_table(dev, id); if (ret) return ret; map = rtnl_dereference(dev->priomap); map->priomap[id] = prio; return 0; } static struct cgroup_subsys_state * cgrp_css_alloc(struct cgroup_subsys_state *parent_css) { struct cgroup_subsys_state *css; css = kzalloc(sizeof(*css), GFP_KERNEL); if (!css) return ERR_PTR(-ENOMEM); return css; } static int cgrp_css_online(struct cgroup_subsys_state *css) { struct cgroup_subsys_state *parent_css = css->parent; struct net_device *dev; int ret = 0; if (css->id > NETPRIO_ID_MAX) return -ENOSPC; if (!parent_css) return 0; rtnl_lock(); /* * Inherit prios from the parent. As all prios are set during * onlining, there is no need to clear them on offline. */ for_each_netdev(&init_net, dev) { u32 prio = netprio_prio(parent_css, dev); ret = netprio_set_prio(css, dev, prio); if (ret) break; } rtnl_unlock(); return ret; } static void cgrp_css_free(struct cgroup_subsys_state *css) { kfree(css); } static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft) { return css->id; } static int read_priomap(struct seq_file *sf, void *v) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) seq_printf(sf, "%s %u\n", dev->name, netprio_prio(seq_css(sf), dev)); rcu_read_unlock(); return 0; } static ssize_t write_priomap(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { char devname[IFNAMSIZ + 1]; struct net_device *dev; u32 prio; int ret; if (sscanf(buf, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2) return -EINVAL; dev = dev_get_by_name(&init_net, devname); if (!dev) return -ENODEV; rtnl_lock(); ret = netprio_set_prio(of_css(of), dev, prio); rtnl_unlock(); dev_put(dev); return ret ?: nbytes; } static int update_netprio(const void *v, struct file *file, unsigned n) { struct socket *sock = sock_from_file(file); if (sock) sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data, (unsigned long)v); return 0; } static void net_prio_attach(struct cgroup_taskset *tset) { struct task_struct *p; struct cgroup_subsys_state *css; cgroup_taskset_for_each(p, css, tset) { void *v = (void *)(unsigned long)css->id; task_lock(p); iterate_fd(p->files, 0, update_netprio, v); task_unlock(p); } } static struct cftype ss_files[] = { { .name = "prioidx", .read_u64 = read_prioidx, }, { .name = "ifpriomap", .seq_show = read_priomap, .write = write_priomap, }, { } /* terminate */ }; struct cgroup_subsys net_prio_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = net_prio_attach, .legacy_cftypes = ss_files, }; static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netprio_map *old; /* * Note this is called with rtnl_lock held so we have update side * protection on our rcu assignments */ switch (event) { case NETDEV_UNREGISTER: old = rtnl_dereference(dev->priomap); RCU_INIT_POINTER(dev->priomap, NULL); if (old) kfree_rcu(old, rcu); break; } return NOTIFY_DONE; } static struct notifier_block netprio_device_notifier = { .notifier_call = netprio_device_event }; static int __init init_cgroup_netprio(void) { register_netdevice_notifier(&netprio_device_notifier); return 0; } subsys_initcall(init_cgroup_netprio);
299 286 65 32 32 65 34 34 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Ethernet-type device handling. * * Version: @(#)eth.c 1.0.7 05/25/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Mr Linux : Arp problems * Alan Cox : Generic queue tidyup (very tiny here) * Alan Cox : eth_header ntohs should be htons * Alan Cox : eth_rebuild_header missing an htons and * minor other things. * Tegge : Arp bug fixes. * Florian : Removed many unnecessary functions, code cleanup * and changes for new arp and skbuff. * Alan Cox : Redid header building to reflect new format. * Alan Cox : ARP only when compiled with CONFIG_INET * Greg Page : 802.2 and SNAP stuff. * Alan Cox : MAC layer pointers/new format. * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding. * Alan Cox : Protect against forwarding explosions with * older network drivers and IFF_ALLMULTI. * Christer Weinigel : Better rebuild header message. * Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup(). */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/nvmem-consumer.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/of_net.h> #include <linux/pci.h> #include <linux/property.h> #include <net/dst.h> #include <net/arp.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip.h> #include <net/dsa.h> #include <net/flow_dissector.h> #include <net/gro.h> #include <linux/uaccess.h> #include <net/pkt_sched.h> /** * eth_header - create the Ethernet header * @skb: buffer to alter * @dev: source device * @type: Ethernet type field * @daddr: destination address (NULL leave destination address) * @saddr: source address (NULL use device source address) * @len: packet length (<= skb->len) * * * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length * in here instead. */ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct ethhdr *eth = skb_push(skb, ETH_HLEN); if (type != ETH_P_802_3 && type != ETH_P_802_2) eth->h_proto = htons(type); else eth->h_proto = htons(len); /* * Set the source hardware address. */ if (!saddr) saddr = dev->dev_addr; memcpy(eth->h_source, saddr, ETH_ALEN); if (daddr) { memcpy(eth->h_dest, daddr, ETH_ALEN); return ETH_HLEN; } /* * Anyway, the loopback-device should never use this function... */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { eth_zero_addr(eth->h_dest); return ETH_HLEN; } return -ETH_HLEN; } EXPORT_SYMBOL(eth_header); /** * eth_get_headlen - determine the length of header for an ethernet frame * @dev: pointer to network device * @data: pointer to start of frame * @len: total length of frame * * Make a best effort attempt to pull the length for all of the headers for * a given frame in a linear buffer. */ u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len) { const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG; const struct ethhdr *eth = (const struct ethhdr *)data; struct flow_keys_basic keys; /* this should never happen, but better safe than sorry */ if (unlikely(len < sizeof(*eth))) return len; /* parse any remaining L2/L3 headers, check for L4 */ if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data, eth->h_proto, sizeof(*eth), len, flags)) return max_t(u32, keys.control.thoff, sizeof(*eth)); /* parse for any L4 headers */ return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len); } EXPORT_SYMBOL(eth_get_headlen); /** * eth_type_trans - determine the packet's protocol ID. * @skb: received socket data * @dev: receiving network device * * The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) { unsigned short _service_access_point; const unsigned short *sap; const struct ethhdr *eth; skb->dev = dev; skb_reset_mac_header(skb); eth = eth_skb_pull_mac(skb); eth_skb_pkt_type(skb, dev); /* * Some variants of DSA tagging don't have an ethertype field * at all, so we check here whether one of those tagging * variants has been configured on the receiving interface, * and if so, set skb->protocol without looking at the packet. */ if (unlikely(netdev_uses_dsa(dev))) return htons(ETH_P_XDSA); if (likely(eth_proto_is_802_3(eth->h_proto))) return eth->h_proto; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point); if (sap && *sap == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } EXPORT_SYMBOL(eth_type_trans); /** * eth_header_parse - extract hardware address from packet * @skb: packet to extract header from * @haddr: destination buffer */ int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const struct ethhdr *eth = eth_hdr(skb); memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; } EXPORT_SYMBOL(eth_header_parse); /** * eth_header_cache - fill cache entry from neighbour * @neigh: source neighbour * @hh: destination cache entry * @type: Ethernet type field * * Create an Ethernet header template from the neighbour. */ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { struct ethhdr *eth; const struct net_device *dev = neigh->dev; eth = (struct ethhdr *) (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth)))); if (type == htons(ETH_P_802_3)) return -1; eth->h_proto = type; memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); memcpy(eth->h_dest, neigh->ha, ETH_ALEN); /* Pairs with READ_ONCE() in neigh_resolve_output(), * neigh_hh_output() and neigh_update_hhs(). */ smp_store_release(&hh->hh_len, ETH_HLEN); return 0; } EXPORT_SYMBOL(eth_header_cache); /** * eth_header_cache_update - update cache entry * @hh: destination cache entry * @dev: network device * @haddr: new hardware address * * Called by Address Resolution module to notify changes in address. */ void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr) { memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, ETH_ALEN); } EXPORT_SYMBOL(eth_header_cache_update); /** * eth_header_parse_protocol - extract protocol from L2 header * @skb: packet to extract protocol from */ __be16 eth_header_parse_protocol(const struct sk_buff *skb) { const struct ethhdr *eth = eth_hdr(skb); return eth->h_proto; } EXPORT_SYMBOL(eth_header_parse_protocol); /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } EXPORT_SYMBOL(eth_prepare_mac_addr_change); /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; eth_hw_addr_set(dev, addr->sa_data); } EXPORT_SYMBOL(eth_commit_mac_addr_change); /** * eth_mac_addr - set new Ethernet hardware address * @dev: network device * @p: socket address * * Change hardware address of device. * * This doesn't change hardware matching, so needs to be overridden * for most real devices. */ int eth_mac_addr(struct net_device *dev, void *p) { int ret; ret = eth_prepare_mac_addr_change(dev, p); if (ret < 0) return ret; eth_commit_mac_addr_change(dev, p); return 0; } EXPORT_SYMBOL(eth_mac_addr); int eth_validate_addr(struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; return 0; } EXPORT_SYMBOL(eth_validate_addr); const struct header_ops eth_header_ops ____cacheline_aligned = { .create = eth_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, .parse_protocol = eth_header_parse_protocol, }; /** * ether_setup - setup Ethernet network device * @dev: network device * * Fill in the fields of the device structure with Ethernet-generic values. */ void ether_setup(struct net_device *dev) { dev->header_ops = &eth_header_ops; dev->type = ARPHRD_ETHER; dev->hard_header_len = ETH_HLEN; dev->min_header_len = ETH_HLEN; dev->mtu = ETH_DATA_LEN; dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_DATA_LEN; dev->addr_len = ETH_ALEN; dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; dev->flags = IFF_BROADCAST|IFF_MULTICAST; dev->priv_flags |= IFF_TX_SKB_SHARING; eth_broadcast_addr(dev->broadcast); } EXPORT_SYMBOL(ether_setup); /** * alloc_etherdev_mqs - Allocates and sets up an Ethernet device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device * @txqs: The number of TX queues this device has. * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. * * Constructs a new net device, complete with a private data area of * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, unsigned int rxqs) { return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_ENUM, ether_setup, txqs, rxqs); } EXPORT_SYMBOL(alloc_etherdev_mqs); ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) { return sysfs_emit(buf, "%*phC\n", len, addr); } EXPORT_SYMBOL(sysfs_format_mac); struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct packet_offload *ptype; unsigned int hlen, off_eth; struct sk_buff *pp = NULL; struct ethhdr *eh, *eh2; struct sk_buff *p; __be16 type; int flush = 1; off_eth = skb_gro_offset(skb); hlen = off_eth + sizeof(*eh); eh = skb_gro_header(skb, hlen, off_eth); if (unlikely(!eh)) goto out; flush = 0; list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; eh2 = (struct ethhdr *)(p->data + off_eth); if (compare_ether_header(eh, eh2)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } type = eh->h_proto; ptype = gro_find_receive_by_type(type); if (ptype == NULL) { flush = 1; goto out; } skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive, ipv6_gro_receive, inet_gro_receive, head, skb); out: skb_gro_flush_final(skb, pp, flush); return pp; } EXPORT_SYMBOL(eth_gro_receive); int eth_gro_complete(struct sk_buff *skb, int nhoff) { struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff); __be16 type = eh->h_proto; struct packet_offload *ptype; int err = -ENOSYS; if (skb->encapsulation) skb_set_inner_mac_header(skb, nhoff); ptype = gro_find_complete_by_type(type); if (ptype != NULL) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*eh)); return err; } EXPORT_SYMBOL(eth_gro_complete); static struct packet_offload eth_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_TEB), .priority = 10, .callbacks = { .gro_receive = eth_gro_receive, .gro_complete = eth_gro_complete, }, }; static int __init eth_offload_init(void) { dev_add_offload(&eth_packet_offload); return 0; } fs_initcall(eth_offload_init); unsigned char * __weak arch_get_platform_mac_address(void) { return NULL; } int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr) { unsigned char *addr; int ret; ret = of_get_mac_address(dev->of_node, mac_addr); if (!ret) return 0; addr = arch_get_platform_mac_address(); if (!addr) return -ENODEV; ether_addr_copy(mac_addr, addr); return 0; } EXPORT_SYMBOL(eth_platform_get_mac_address); /** * platform_get_ethdev_address - Set netdev's MAC address from a given device * @dev: Pointer to the device * @netdev: Pointer to netdev to write the address to * * Wrapper around eth_platform_get_mac_address() which writes the address * directly to netdev->dev_addr. */ int platform_get_ethdev_address(struct device *dev, struct net_device *netdev) { u8 addr[ETH_ALEN] __aligned(2); int ret; ret = eth_platform_get_mac_address(dev, addr); if (!ret) eth_hw_addr_set(netdev, addr); return ret; } EXPORT_SYMBOL(platform_get_ethdev_address); /** * nvmem_get_mac_address - Obtain the MAC address from an nvmem cell named * 'mac-address' associated with given device. * * @dev: Device with which the mac-address cell is associated. * @addrbuf: Buffer to which the MAC address will be copied on success. * * Returns 0 on success or a negative error number on failure. */ int nvmem_get_mac_address(struct device *dev, void *addrbuf) { struct nvmem_cell *cell; const void *mac; size_t len; cell = nvmem_cell_get(dev, "mac-address"); if (IS_ERR(cell)) return PTR_ERR(cell); mac = nvmem_cell_read(cell, &len); nvmem_cell_put(cell); if (IS_ERR(mac)) return PTR_ERR(mac); if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { kfree(mac); return -EINVAL; } ether_addr_copy(addrbuf, mac); kfree(mac); return 0; } static int fwnode_get_mac_addr(struct fwnode_handle *fwnode, const char *name, char *addr) { int ret; ret = fwnode_property_read_u8_array(fwnode, name, addr, ETH_ALEN); if (ret) return ret; if (!is_valid_ether_addr(addr)) return -EINVAL; return 0; } /** * fwnode_get_mac_address - Get the MAC from the firmware node * @fwnode: Pointer to the firmware node * @addr: Address of buffer to store the MAC in * * Search the firmware node for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'mac-address' and 'local-mac-address', with * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. * In this case, the real MAC is in 'local-mac-address', and 'mac-address' * exists but is all zeros. */ int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr) { if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) || !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) || !fwnode_get_mac_addr(fwnode, "address", addr)) return 0; return -ENOENT; } EXPORT_SYMBOL(fwnode_get_mac_address); /** * device_get_mac_address - Get the MAC for a given device * @dev: Pointer to the device * @addr: Address of buffer to store the MAC in */ int device_get_mac_address(struct device *dev, char *addr) { return fwnode_get_mac_address(dev_fwnode(dev), addr); } EXPORT_SYMBOL(device_get_mac_address); /** * device_get_ethdev_address - Set netdev's MAC address from a given device * @dev: Pointer to the device * @netdev: Pointer to netdev to write the address to * * Wrapper around device_get_mac_address() which writes the address * directly to netdev->dev_addr. */ int device_get_ethdev_address(struct device *dev, struct net_device *netdev) { u8 addr[ETH_ALEN]; int ret; ret = device_get_mac_address(dev, addr); if (!ret) eth_hw_addr_set(netdev, addr); return ret; } EXPORT_SYMBOL(device_get_ethdev_address);
7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/pagewalk.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/secretmem.h> #include "internal.h" struct mlock_fbatch { local_lock_t lock; struct folio_batch fbatch; }; static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = { .lock = INIT_LOCAL_LOCK(lock), }; bool can_do_mlock(void) { if (rlimit(RLIMIT_MEMLOCK) != 0) return true; if (capable(CAP_IPC_LOCK)) return true; return false; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked folios are marked with the PG_mlocked flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it * will be ostensibly placed on the LRU "unevictable" list (actually no such * list exists), rather than the [in]active lists. PG_unevictable is set to * indicate the unevictable state. */ static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) { /* There is nothing more we can do while it's off LRU */ if (!folio_test_clear_lru(folio)) return lruvec; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (unlikely(folio_evictable(folio))) { /* * This is a little surprising, but quite possible: PG_mlocked * must have got cleared already by another CPU. Could this * folio be unevictable? I'm not sure, but move it now if so. */ if (folio_test_unevictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, folio_nr_pages(folio)); } goto out; } if (folio_test_unevictable(folio)) { if (folio_test_mlocked(folio)) folio->mlock_count++; goto out; } lruvec_del_folio(lruvec, folio); folio_clear_active(folio); folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: folio_set_lru(folio); return lruvec; } static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) { VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); lruvec = folio_lruvec_relock_irq(folio, lruvec); /* As above, this is a little surprising, but possible */ if (unlikely(folio_evictable(folio))) goto out; folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: lruvec_add_folio(lruvec, folio); folio_set_lru(folio); return lruvec; } static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec) { int nr_pages = folio_nr_pages(folio); bool isolated = false; if (!folio_test_clear_lru(folio)) goto munlock; isolated = true; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (folio_test_unevictable(folio)) { /* Then mlock_count is maintained, but might undercount */ if (folio->mlock_count) folio->mlock_count--; if (folio->mlock_count) goto out; } /* else assume that was the last mlock: reclaim will fix it if not */ munlock: if (folio_test_clear_mlocked(folio)) { __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); if (isolated || !folio_test_unevictable(folio)) __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); else __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); } /* folio_evictable() has to be checked *after* clearing Mlocked */ if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } out: if (isolated) folio_set_lru(folio); return lruvec; } /* * Flags held in the low bits of a struct folio pointer on the mlock_fbatch. */ #define LRU_FOLIO 0x1 #define NEW_FOLIO 0x2 static inline struct folio *mlock_lru(struct folio *folio) { return (struct folio *)((unsigned long)folio + LRU_FOLIO); } static inline struct folio *mlock_new(struct folio *folio) { return (struct folio *)((unsigned long)folio + NEW_FOLIO); } /* * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can * make use of such folio pointer flags in future, but for now just keep it for * mlock. We could use three separate folio batches instead, but one feels * better (munlocking a full folio batch does not need to drain mlocking folio * batches first). */ static void mlock_folio_batch(struct folio_batch *fbatch) { struct lruvec *lruvec = NULL; unsigned long mlock; struct folio *folio; int i; for (i = 0; i < folio_batch_count(fbatch); i++) { folio = fbatch->folios[i]; mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO); folio = (struct folio *)((unsigned long)folio - mlock); fbatch->folios[i] = folio; if (mlock & LRU_FOLIO) lruvec = __mlock_folio(folio, lruvec); else if (mlock & NEW_FOLIO) lruvec = __mlock_new_folio(folio, lruvec); else lruvec = __munlock_folio(folio, lruvec); } if (lruvec) unlock_page_lruvec_irq(lruvec); folios_put(fbatch); } void mlock_drain_local(void) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } void mlock_drain_remote(int cpu) { struct folio_batch *fbatch; WARN_ON_ONCE(cpu_online(cpu)); fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); } bool need_mlock_drain(int cpu) { return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); } /** * mlock_folio - mlock a folio already on (or temporarily off) LRU * @folio: folio to be mlocked. */ void mlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (!folio_test_set_mlocked(folio)) { int nr_pages = folio_nr_pages(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); } folio_get(folio); if (!folio_batch_add(fbatch, mlock_lru(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * mlock_new_folio - mlock a newly allocated folio not yet on LRU * @folio: folio to be mlocked, either normal or a THP head. */ void mlock_new_folio(struct folio *folio) { struct folio_batch *fbatch; int nr_pages = folio_nr_pages(folio); local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); folio_set_mlocked(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); folio_get(folio); if (!folio_batch_add(fbatch, mlock_new(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * munlock_folio - munlock a folio * @folio: folio to be munlocked, either normal or a THP head. */ void munlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); /* * folio_test_clear_mlocked(folio) must be left to __munlock_folio(), * which will check whether the folio is multiply mlocked. */ folio_get(folio); if (!folio_batch_add(fbatch, folio) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } static inline unsigned int folio_mlock_step(struct folio *folio, pte_t *pte, unsigned long addr, unsigned long end) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; unsigned int count = (end - addr) >> PAGE_SHIFT; pte_t ptent = ptep_get(pte); if (!folio_test_large(folio)) return 1; return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL, NULL, NULL); } static inline bool allow_mlock_munlock(struct folio *folio, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned int step) { /* * For unlock, allow munlock large folio which is partially * mapped to VMA. As it's possible that large folio is * mlocked and VMA is split later. * * During memory pressure, such kind of large folio can * be split. And the pages are not in VM_LOCKed VMA * can be reclaimed. */ if (!(vma->vm_flags & VM_LOCKED)) return true; /* folio_within_range() cannot take KSM, but any small folio is OK */ if (!folio_test_large(folio)) return true; /* folio not in range [start, end), skip mlock */ if (!folio_within_range(folio, vma, start, end)) return false; /* folio is not fully mapped, skip mlock */ if (step != folio_nr_pages(folio)) return false; return true; } static int mlock_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; pte_t ptent; struct folio *folio; unsigned int step = 1; unsigned long start = addr; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (!pmd_present(*pmd)) goto out; if (is_huge_zero_pmd(*pmd)) goto out; folio = pmd_folio(*pmd); if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); goto out; } start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!start_pte) { walk->action = ACTION_AGAIN; return 0; } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; step = folio_mlock_step(folio, pte, addr, end); if (!allow_mlock_munlock(folio, vma, start, end, step)) goto next_entry; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); next_entry: pte += step - 1; addr += (step - 1) << PAGE_SHIFT; } pte_unmap(start_pte); out: spin_unlock(ptl); cond_resched(); return 0; } /* * mlock_vma_pages_range() - mlock any pages already in the range, * or munlock all pages in the range. * @vma - vma containing range to be mlock()ed or munlock()ed * @start - start address in @vma of the range * @end - end of range in @vma * @newflags - the new set of flags for @vma. * * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED; * called for munlock() and munlockall(), to clear VM_LOCKED from @vma. */ static void mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, vm_flags_t newflags) { static const struct mm_walk_ops mlock_walk_ops = { .pmd_entry = mlock_pte_range, .walk_lock = PGWALK_WRLOCK_VERIFY, }; /* * There is a slight chance that concurrent page migration, * or page reclaim finding a page of this now-VM_LOCKED vma, * will call mlock_vma_folio() and raise page's mlock_count: * double counting, leaving the page unevictable indefinitely. * Communicate this danger to mlock_vma_folio() with VM_IO, * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas. * mmap_lock is held in write mode here, so this weird * combination should not be visible to other mmap_lock users; * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED. */ if (newflags & VM_LOCKED) newflags |= VM_IO; vma_start_write(vma); vm_flags_reset_once(vma, newflags); lru_add_drain(); walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); lru_add_drain(); if (newflags & VM_IO) { newflags &= ~VM_IO; vm_flags_reset_once(vma, newflags); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; int nr_pages; int ret = 0; vm_flags_t oldflags = vma->vm_flags; if (newflags == oldflags || (oldflags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE)) /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!(newflags & VM_LOCKED)) nr_pages = -nr_pages; else if (oldflags & VM_LOCKED) nr_pages = 0; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_lock held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { /* No work to do, and mlocking twice would be wrong */ vma_start_write(vma); vm_flags_reset(vma, newflags); } else { mlock_vma_pages_range(vma, start, end, newflags); } out: *prev = vma; return ret; } static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; struct vm_area_struct *vma, *prev; VMA_ITERATOR(vmi, current->mm, start); VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = vma_iter_load(&vmi); if (!vma) return -ENOMEM; prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; nstart = start; tmp = vma->vm_start; for_each_vma_range(vmi, vma, end) { int error; vm_flags_t newflags; if (vma->vm_start != tmp) return -ENOMEM; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); if (error) return error; tmp = vma_iter_end(&vmi); nstart = tmp; } if (tmp < end) return -ENOMEM; return 0; } /* * Go through vma areas and sum size of mlocked * vma pages, as return value. * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) * is also counted. * Return value: previously mlocked page counts */ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; unsigned long count = 0; unsigned long end; VMA_ITERATOR(vmi, mm, start); /* Don't overflow past ULONG_MAX */ if (unlikely(ULONG_MAX - len < start)) end = ULONG_MAX; else end = start + len; for_each_vma_range(vmi, vma, end) { if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); if (end < vma->vm_end) { count += end - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; } } return count >> PAGE_SHIFT; } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; start = untagged_addr(start); if (!can_do_mlock()) return -EPERM; len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* * It is possible that the regions requested intersect with * previously mlocked areas, that part area in "mm->locked_vm" * should not be counted to new mlock increment count. So check * and adjust locked count if necessary. */ locked -= count_mm_mlocked_page_nr(current->mm, start, len); } /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = apply_vma_lock_flags(start, len, flags); mmap_write_unlock(current->mm); if (error) return error; error = __mm_populate(start, len, 0); if (error) return __mlock_posix_error_return(error); return 0; } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { return do_mlock(start, len, VM_LOCKED); } SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED; if (flags & ~MLOCK_ONFAULT) return -EINVAL; if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT; return do_mlock(start, len, vm_flags); } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; start = untagged_addr(start); len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_vma_lock_flags(start, len, 0); mmap_write_unlock(current->mm); return ret; } /* * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) * and translate into the appropriate modifications to mm->def_flags and/or the * flags for all current VMAs. * * There are a couple of subtleties with this. If mlockall() is called multiple * times with different flags, the values do not necessarily stack. If mlockall * is called once including the MCL_FUTURE flag and then a second time without * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. */ static int apply_mlockall_flags(int flags) { VMA_ITERATOR(vmi, current->mm, 0); struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= ~VM_LOCKED_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; if (flags & MCL_ONFAULT) current->mm->def_flags |= VM_LOCKONFAULT; if (!(flags & MCL_CURRENT)) goto out; } if (flags & MCL_CURRENT) { to_add |= VM_LOCKED; if (flags & MCL_ONFAULT) to_add |= VM_LOCKONFAULT; } for_each_vma(vmi, vma) { int error; vm_flags_t newflags; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= to_add; error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, newflags); /* Ignore errors, but prev needs fixing up. */ if (error) prev = vma; cond_resched(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || flags == MCL_ONFAULT) return -EINVAL; if (!can_do_mlock()) return -EPERM; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = apply_mlockall_flags(flags); mmap_write_unlock(current->mm); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); return ret; } SYSCALL_DEFINE0(munlockall) { int ret; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_mlockall_flags(0); mmap_write_unlock(current->mm); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct ucounts *ucounts) { unsigned long lock_limit, locked; long memlock; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit != RLIM_INFINITY) lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); goto out; } if (!get_ucounts(ucounts)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); allowed = 0; goto out; } allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct ucounts *ucounts) { spin_lock(&shmlock_user_lock); dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); spin_unlock(&shmlock_user_lock); put_ucounts(ucounts); }
20 19 20 17 11 11 13 2 11 13 6 6 6 3 1 2 3 3 2 1 1 2 3 2 3 2 2 1 2 1 2 1 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 7 7 2 5 1 5 3 1 3 1 3 1 3 2 2 1 5 5 3 2 6 3 4 1 7 13 13 14 20 20 20 21 6 20 2 2 1 1 2 16 2 2 16 1 20 20 2 9 7 2 1 20 20 20 20 18 4 20 21 14 21 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 // SPDX-License-Identifier: GPL-2.0 /* * Silicon Laboratories CP210x USB to RS232 serial adaptor driver * * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk) * Copyright (C) 2010-2021 Johan Hovold (johan@kernel.org) * * Support to set flow control line levels using TIOCMGET and TIOCMSET * thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow * control thanks to Munir Nassar nassarmu@real-time.com * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/gpio/driver.h> #include <linux/bitops.h> #include <linux/mutex.h> #define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver" /* * Function Prototypes */ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *); static void cp210x_close(struct usb_serial_port *); static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, const struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, const struct ktermios *); static bool cp210x_tx_empty(struct usb_serial_port *port); static int cp210x_tiocmget(struct tty_struct *); static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int); static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int, unsigned int); static int cp210x_break_ctl(struct tty_struct *, int); static int cp210x_attach(struct usb_serial *); static void cp210x_disconnect(struct usb_serial *); static void cp210x_release(struct usb_serial *); static int cp210x_port_probe(struct usb_serial_port *); static void cp210x_port_remove(struct usb_serial_port *); static void cp210x_dtr_rts(struct usb_serial_port *port, int on); static void cp210x_process_read_urb(struct urb *urb); static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */ { USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */ { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */ { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */ { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */ { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */ { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */ { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */ { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */ { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */ { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */ { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */ { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */ { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */ { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */ { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */ { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */ { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */ { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct cp210x_serial_private { #ifdef CONFIG_GPIOLIB struct gpio_chip gc; bool gpio_registered; u16 gpio_pushpull; u16 gpio_altfunc; u16 gpio_input; #endif u8 partnum; u32 fw_version; speed_t min_speed; speed_t max_speed; bool use_actual_rate; bool no_flow_control; bool no_event_mode; }; enum cp210x_event_state { ES_DATA, ES_ESCAPE, ES_LSR, ES_LSR_DATA_0, ES_LSR_DATA_1, ES_MSR }; struct cp210x_port_private { u8 bInterfaceNumber; bool event_mode; enum cp210x_event_state event_state; u8 lsr; struct mutex mutex; bool crtscts; bool dtr; bool rts; }; static struct usb_serial_driver cp210x_device = { .driver = { .name = "cp210x", }, .id_table = id_table, .num_ports = 1, .bulk_in_size = 256, .bulk_out_size = 256, .open = cp210x_open, .close = cp210x_close, .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tx_empty = cp210x_tx_empty, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .get_icount = usb_serial_generic_get_icount, .attach = cp210x_attach, .disconnect = cp210x_disconnect, .release = cp210x_release, .port_probe = cp210x_port_probe, .port_remove = cp210x_port_remove, .dtr_rts = cp210x_dtr_rts, .process_read_urb = cp210x_process_read_urb, }; static struct usb_serial_driver * const serial_drivers[] = { &cp210x_device, NULL }; /* Config request types */ #define REQTYPE_HOST_TO_INTERFACE 0x41 #define REQTYPE_INTERFACE_TO_HOST 0xc1 #define REQTYPE_HOST_TO_DEVICE 0x40 #define REQTYPE_DEVICE_TO_HOST 0xc0 /* Config request codes */ #define CP210X_IFC_ENABLE 0x00 #define CP210X_SET_BAUDDIV 0x01 #define CP210X_GET_BAUDDIV 0x02 #define CP210X_SET_LINE_CTL 0x03 #define CP210X_GET_LINE_CTL 0x04 #define CP210X_SET_BREAK 0x05 #define CP210X_IMM_CHAR 0x06 #define CP210X_SET_MHS 0x07 #define CP210X_GET_MDMSTS 0x08 #define CP210X_SET_XON 0x09 #define CP210X_SET_XOFF 0x0A #define CP210X_SET_EVENTMASK 0x0B #define CP210X_GET_EVENTMASK 0x0C #define CP210X_SET_CHAR 0x0D #define CP210X_GET_CHARS 0x0E #define CP210X_GET_PROPS 0x0F #define CP210X_GET_COMM_STATUS 0x10 #define CP210X_RESET 0x11 #define CP210X_PURGE 0x12 #define CP210X_SET_FLOW 0x13 #define CP210X_GET_FLOW 0x14 #define CP210X_EMBED_EVENTS 0x15 #define CP210X_GET_EVENTSTATE 0x16 #define CP210X_SET_CHARS 0x19 #define CP210X_GET_BAUDRATE 0x1D #define CP210X_SET_BAUDRATE 0x1E #define CP210X_VENDOR_SPECIFIC 0xFF /* CP210X_IFC_ENABLE */ #define UART_ENABLE 0x0001 #define UART_DISABLE 0x0000 /* CP210X_(SET|GET)_BAUDDIV */ #define BAUD_RATE_GEN_FREQ 0x384000 /* CP210X_(SET|GET)_LINE_CTL */ #define BITS_DATA_MASK 0X0f00 #define BITS_DATA_5 0X0500 #define BITS_DATA_6 0X0600 #define BITS_DATA_7 0X0700 #define BITS_DATA_8 0X0800 #define BITS_DATA_9 0X0900 #define BITS_PARITY_MASK 0x00f0 #define BITS_PARITY_NONE 0x0000 #define BITS_PARITY_ODD 0x0010 #define BITS_PARITY_EVEN 0x0020 #define BITS_PARITY_MARK 0x0030 #define BITS_PARITY_SPACE 0x0040 #define BITS_STOP_MASK 0x000f #define BITS_STOP_1 0x0000 #define BITS_STOP_1_5 0x0001 #define BITS_STOP_2 0x0002 /* CP210X_SET_BREAK */ #define BREAK_ON 0x0001 #define BREAK_OFF 0x0000 /* CP210X_(SET_MHS|GET_MDMSTS) */ #define CONTROL_DTR 0x0001 #define CONTROL_RTS 0x0002 #define CONTROL_CTS 0x0010 #define CONTROL_DSR 0x0020 #define CONTROL_RING 0x0040 #define CONTROL_DCD 0x0080 #define CONTROL_WRITE_DTR 0x0100 #define CONTROL_WRITE_RTS 0x0200 /* CP210X_(GET|SET)_CHARS */ struct cp210x_special_chars { u8 bEofChar; u8 bErrorChar; u8 bBreakChar; u8 bEventChar; u8 bXonChar; u8 bXoffChar; }; /* CP210X_VENDOR_SPECIFIC values */ #define CP210X_GET_FW_VER 0x000E #define CP210X_READ_2NCONFIG 0x000E #define CP210X_GET_FW_VER_2N 0x0010 #define CP210X_READ_LATCH 0x00C2 #define CP210X_GET_PARTNUM 0x370B #define CP210X_GET_PORTCONFIG 0x370C #define CP210X_GET_DEVICEMODE 0x3711 #define CP210X_WRITE_LATCH 0x37E1 /* Part number definitions */ #define CP210X_PARTNUM_CP2101 0x01 #define CP210X_PARTNUM_CP2102 0x02 #define CP210X_PARTNUM_CP2103 0x03 #define CP210X_PARTNUM_CP2104 0x04 #define CP210X_PARTNUM_CP2105 0x05 #define CP210X_PARTNUM_CP2108 0x08 #define CP210X_PARTNUM_CP2102N_QFN28 0x20 #define CP210X_PARTNUM_CP2102N_QFN24 0x21 #define CP210X_PARTNUM_CP2102N_QFN20 0x22 #define CP210X_PARTNUM_UNKNOWN 0xFF /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ struct cp210x_comm_status { __le32 ulErrors; __le32 ulHoldReasons; __le32 ulAmountInInQueue; __le32 ulAmountInOutQueue; u8 bEofReceived; u8 bWaitForImmediate; u8 bReserved; } __packed; /* * CP210X_PURGE - 16 bits passed in wValue of USB request. * SiLabs app note AN571 gives a strange description of the 4 bits: * bit 0 or bit 2 clears the transmit queue and 1 or 3 receive. * writing 1 to all, however, purges cp2108 well enough to avoid the hang. */ #define PURGE_ALL 0x000f /* CP210X_EMBED_EVENTS */ #define CP210X_ESCCHAR 0xec #define CP210X_LSR_OVERRUN BIT(1) #define CP210X_LSR_PARITY BIT(2) #define CP210X_LSR_FRAME BIT(3) #define CP210X_LSR_BREAK BIT(4) /* CP210X_GET_FLOW/CP210X_SET_FLOW read/write these 0x10 bytes */ struct cp210x_flow_ctl { __le32 ulControlHandshake; __le32 ulFlowReplace; __le32 ulXonLimit; __le32 ulXoffLimit; }; /* cp210x_flow_ctl::ulControlHandshake */ #define CP210X_SERIAL_DTR_MASK GENMASK(1, 0) #define CP210X_SERIAL_DTR_INACTIVE (0 << 0) #define CP210X_SERIAL_DTR_ACTIVE (1 << 0) #define CP210X_SERIAL_DTR_FLOW_CTL (2 << 0) #define CP210X_SERIAL_CTS_HANDSHAKE BIT(3) #define CP210X_SERIAL_DSR_HANDSHAKE BIT(4) #define CP210X_SERIAL_DCD_HANDSHAKE BIT(5) #define CP210X_SERIAL_DSR_SENSITIVITY BIT(6) /* cp210x_flow_ctl::ulFlowReplace */ #define CP210X_SERIAL_AUTO_TRANSMIT BIT(0) #define CP210X_SERIAL_AUTO_RECEIVE BIT(1) #define CP210X_SERIAL_ERROR_CHAR BIT(2) #define CP210X_SERIAL_NULL_STRIPPING BIT(3) #define CP210X_SERIAL_BREAK_CHAR BIT(4) #define CP210X_SERIAL_RTS_MASK GENMASK(7, 6) #define CP210X_SERIAL_RTS_INACTIVE (0 << 6) #define CP210X_SERIAL_RTS_ACTIVE (1 << 6) #define CP210X_SERIAL_RTS_FLOW_CTL (2 << 6) #define CP210X_SERIAL_XOFF_CONTINUE BIT(31) /* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */ struct cp210x_pin_mode { u8 eci; u8 sci; }; #define CP210X_PIN_MODE_MODEM 0 #define CP210X_PIN_MODE_GPIO BIT(0) /* * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes * on a CP2105 chip. Structure needs padding due to unused/unspecified bytes. */ struct cp210x_dual_port_config { __le16 gpio_mode; u8 __pad0[2]; __le16 reset_state; u8 __pad1[4]; __le16 suspend_state; u8 sci_cfg; u8 eci_cfg; u8 device_cfg; } __packed; /* * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xd bytes * on a CP2104 chip. Structure needs padding due to unused/unspecified bytes. */ struct cp210x_single_port_config { __le16 gpio_mode; u8 __pad0[2]; __le16 reset_state; u8 __pad1[4]; __le16 suspend_state; u8 device_cfg; } __packed; /* GPIO modes */ #define CP210X_SCI_GPIO_MODE_OFFSET 9 #define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9) #define CP210X_ECI_GPIO_MODE_OFFSET 2 #define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2) #define CP210X_GPIO_MODE_OFFSET 8 #define CP210X_GPIO_MODE_MASK GENMASK(11, 8) /* CP2105 port configuration values */ #define CP2105_GPIO0_TXLED_MODE BIT(0) #define CP2105_GPIO1_RXLED_MODE BIT(1) #define CP2105_GPIO1_RS485_MODE BIT(2) /* CP2104 port configuration values */ #define CP2104_GPIO0_TXLED_MODE BIT(0) #define CP2104_GPIO1_RXLED_MODE BIT(1) #define CP2104_GPIO2_RS485_MODE BIT(2) struct cp210x_quad_port_state { __le16 gpio_mode_pb0; __le16 gpio_mode_pb1; __le16 gpio_mode_pb2; __le16 gpio_mode_pb3; __le16 gpio_mode_pb4; __le16 gpio_lowpower_pb0; __le16 gpio_lowpower_pb1; __le16 gpio_lowpower_pb2; __le16 gpio_lowpower_pb3; __le16 gpio_lowpower_pb4; __le16 gpio_latch_pb0; __le16 gpio_latch_pb1; __le16 gpio_latch_pb2; __le16 gpio_latch_pb3; __le16 gpio_latch_pb4; }; /* * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0x49 bytes * on a CP2108 chip. * * See https://www.silabs.com/documents/public/application-notes/an978-cp210x-usb-to-uart-api-specification.pdf */ struct cp210x_quad_port_config { struct cp210x_quad_port_state reset_state; struct cp210x_quad_port_state suspend_state; u8 ipdelay_ifc[4]; u8 enhancedfxn_ifc[4]; u8 enhancedfxn_device; u8 extclkfreq[4]; } __packed; #define CP2108_EF_IFC_GPIO_TXLED 0x01 #define CP2108_EF_IFC_GPIO_RXLED 0x02 #define CP2108_EF_IFC_GPIO_RS485 0x04 #define CP2108_EF_IFC_GPIO_RS485_LOGIC 0x08 #define CP2108_EF_IFC_GPIO_CLOCK 0x10 #define CP2108_EF_IFC_DYNAMIC_SUSPEND 0x40 /* CP2102N configuration array indices */ #define CP210X_2NCONFIG_CONFIG_VERSION_IDX 2 #define CP210X_2NCONFIG_GPIO_MODE_IDX 581 #define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587 #define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600 /* CP2102N QFN20 port configuration values */ #define CP2102N_QFN20_GPIO2_TXLED_MODE BIT(2) #define CP2102N_QFN20_GPIO3_RXLED_MODE BIT(3) #define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4) #define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6) /* * CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x02 bytes * for CP2102N, CP2103, CP2104 and CP2105. */ struct cp210x_gpio_write { u8 mask; u8 state; }; /* * CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x04 bytes * for CP2108. */ struct cp210x_gpio_write16 { __le16 mask; __le16 state; }; /* * Helper to get interface number when we only have struct usb_serial. */ static u8 cp210x_interface_num(struct usb_serial *serial) { struct usb_host_interface *cur_altsetting; cur_altsetting = serial->interface->cur_altsetting; return cur_altsetting->desc.bInterfaceNumber; } /* * Reads a variable-sized block of CP210X_ registers, identified by req. * Returns data into buf in native USB byte order. */ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req, void *buf, int bufsize) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int result; result = usb_control_msg_recv(serial->dev, 0, req, REQTYPE_INTERFACE_TO_HOST, 0, port_priv->bInterfaceNumber, buf, bufsize, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (result) { dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n", req, bufsize, result); return result; } return 0; } /* * Reads any 8-bit CP210X_ register identified by req. */ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val) { return cp210x_read_reg_block(port, req, val, sizeof(*val)); } /* * Reads a variable-sized vendor block of CP210X_ registers, identified by val. * Returns data into buf in native USB byte order. */ static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val, void *buf, int bufsize) { int result; result = usb_control_msg_recv(serial->dev, 0, CP210X_VENDOR_SPECIFIC, type, val, cp210x_interface_num(serial), buf, bufsize, USB_CTRL_GET_TIMEOUT, GFP_KERNEL); if (result) { dev_err(&serial->interface->dev, "failed to get vendor val 0x%04x size %d: %d\n", val, bufsize, result); return result; } return 0; } /* * Writes any 16-bit CP210X_ register (req) whose value is passed * entirely in the wValue field of the USB request. */ static int cp210x_write_u16_reg(struct usb_serial_port *port, u8 req, u16 val) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int result; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), req, REQTYPE_HOST_TO_INTERFACE, val, port_priv->bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); if (result < 0) { dev_err(&port->dev, "failed set request 0x%x status: %d\n", req, result); } return result; } /* * Writes a variable-sized block of CP210X_ registers, identified by req. * Data in buf must be in native USB byte order. */ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req, void *buf, int bufsize) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int result; result = usb_control_msg_send(serial->dev, 0, req, REQTYPE_HOST_TO_INTERFACE, 0, port_priv->bInterfaceNumber, buf, bufsize, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (result) { dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n", req, bufsize, result); return result; } return 0; } /* * Writes any 32-bit CP210X_ register identified by req. */ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val) { __le32 le32_val; le32_val = cpu_to_le32(val); return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val)); } #ifdef CONFIG_GPIOLIB /* * Writes a variable-sized vendor block of CP210X_ registers, identified by val. * Data in buf must be in native USB byte order. */ static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type, u16 val, void *buf, int bufsize) { int result; result = usb_control_msg_send(serial->dev, 0, CP210X_VENDOR_SPECIFIC, type, val, cp210x_interface_num(serial), buf, bufsize, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (result) { dev_err(&serial->interface->dev, "failed to set vendor val 0x%04x size %d: %d\n", val, bufsize, result); return result; } return 0; } #endif static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) { struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int result; result = cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_ENABLE); if (result) { dev_err(&port->dev, "%s - Unable to enable UART\n", __func__); return result; } if (tty) cp210x_set_termios(tty, port, NULL); result = usb_serial_generic_open(tty, port); if (result) goto err_disable; return 0; err_disable: cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_DISABLE); port_priv->event_mode = false; return result; } static void cp210x_close(struct usb_serial_port *port) { struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); usb_serial_generic_close(port); /* Clear both queues; cp2108 needs this to avoid an occasional hang */ cp210x_write_u16_reg(port, CP210X_PURGE, PURGE_ALL); cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_DISABLE); /* Disabling the interface disables event-insertion mode. */ port_priv->event_mode = false; } static void cp210x_process_lsr(struct usb_serial_port *port, unsigned char lsr, char *flag) { if (lsr & CP210X_LSR_BREAK) { port->icount.brk++; *flag = TTY_BREAK; } else if (lsr & CP210X_LSR_PARITY) { port->icount.parity++; *flag = TTY_PARITY; } else if (lsr & CP210X_LSR_FRAME) { port->icount.frame++; *flag = TTY_FRAME; } if (lsr & CP210X_LSR_OVERRUN) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } static bool cp210x_process_char(struct usb_serial_port *port, unsigned char *ch, char *flag) { struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); switch (port_priv->event_state) { case ES_DATA: if (*ch == CP210X_ESCCHAR) { port_priv->event_state = ES_ESCAPE; break; } return false; case ES_ESCAPE: switch (*ch) { case 0: dev_dbg(&port->dev, "%s - escape char\n", __func__); *ch = CP210X_ESCCHAR; port_priv->event_state = ES_DATA; return false; case 1: port_priv->event_state = ES_LSR_DATA_0; break; case 2: port_priv->event_state = ES_LSR; break; case 3: port_priv->event_state = ES_MSR; break; default: dev_err(&port->dev, "malformed event 0x%02x\n", *ch); port_priv->event_state = ES_DATA; break; } break; case ES_LSR_DATA_0: port_priv->lsr = *ch; port_priv->event_state = ES_LSR_DATA_1; break; case ES_LSR_DATA_1: dev_dbg(&port->dev, "%s - lsr = 0x%02x, data = 0x%02x\n", __func__, port_priv->lsr, *ch); cp210x_process_lsr(port, port_priv->lsr, flag); port_priv->event_state = ES_DATA; return false; case ES_LSR: dev_dbg(&port->dev, "%s - lsr = 0x%02x\n", __func__, *ch); port_priv->lsr = *ch; cp210x_process_lsr(port, port_priv->lsr, flag); port_priv->event_state = ES_DATA; break; case ES_MSR: dev_dbg(&port->dev, "%s - msr = 0x%02x\n", __func__, *ch); /* unimplemented */ port_priv->event_state = ES_DATA; break; } return true; } static void cp210x_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); unsigned char *ch = urb->transfer_buffer; char flag; int i; if (!urb->actual_length) return; if (port_priv->event_mode) { for (i = 0; i < urb->actual_length; i++, ch++) { flag = TTY_NORMAL; if (cp210x_process_char(port, ch, &flag)) continue; tty_insert_flip_char(&port->port, *ch, flag); } } else { tty_insert_flip_string(&port->port, ch, urb->actual_length); } tty_flip_buffer_push(&port->port); } /* * Read how many bytes are waiting in the TX queue. */ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port, u32 *count) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); struct cp210x_comm_status sts; int result; result = usb_control_msg_recv(serial->dev, 0, CP210X_GET_COMM_STATUS, REQTYPE_INTERFACE_TO_HOST, 0, port_priv->bInterfaceNumber, &sts, sizeof(sts), USB_CTRL_GET_TIMEOUT, GFP_KERNEL); if (result) { dev_err(&port->dev, "failed to get comm status: %d\n", result); return result; } *count = le32_to_cpu(sts.ulAmountInOutQueue); return 0; } static bool cp210x_tx_empty(struct usb_serial_port *port) { int err; u32 count; err = cp210x_get_tx_queue_byte_count(port, &count); if (err) return true; return !count; } struct cp210x_rate { speed_t rate; speed_t high; }; static const struct cp210x_rate cp210x_an205_table1[] = { { 300, 300 }, { 600, 600 }, { 1200, 1200 }, { 1800, 1800 }, { 2400, 2400 }, { 4000, 4000 }, { 4800, 4803 }, { 7200, 7207 }, { 9600, 9612 }, { 14400, 14428 }, { 16000, 16062 }, { 19200, 19250 }, { 28800, 28912 }, { 38400, 38601 }, { 51200, 51558 }, { 56000, 56280 }, { 57600, 58053 }, { 64000, 64111 }, { 76800, 77608 }, { 115200, 117028 }, { 128000, 129347 }, { 153600, 156868 }, { 230400, 237832 }, { 250000, 254234 }, { 256000, 273066 }, { 460800, 491520 }, { 500000, 567138 }, { 576000, 670254 }, { 921600, UINT_MAX } }; /* * Quantises the baud rate as per AN205 Table 1 */ static speed_t cp210x_get_an205_rate(speed_t baud) { int i; for (i = 0; i < ARRAY_SIZE(cp210x_an205_table1); ++i) { if (baud <= cp210x_an205_table1[i].high) break; } return cp210x_an205_table1[i].rate; } static speed_t cp210x_get_actual_rate(speed_t baud) { unsigned int prescale = 1; unsigned int div; if (baud <= 365) prescale = 4; div = DIV_ROUND_CLOSEST(48000000, 2 * prescale * baud); baud = 48000000 / (2 * prescale * div); return baud; } /* * CP2101 supports the following baud rates: * * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800, * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600 * * CP2102 and CP2103 support the following additional rates: * * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000, * 576000 * * The device will map a requested rate to a supported one, but the result * of requests for rates greater than 1053257 is undefined (see AN205). * * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud, * respectively, with an error less than 1%. The actual rates are determined * by * * div = round(freq / (2 x prescale x request)) * actual = freq / (2 x prescale x div) * * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps * or 1 otherwise. * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1 * otherwise. */ static void cp210x_change_speed(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct cp210x_serial_private *priv = usb_get_serial_data(serial); u32 baud; if (tty->termios.c_ospeed == 0) return; /* * This maps the requested rate to the actual rate, a valid rate on * cp2102 or cp2103, or to an arbitrary rate in [1M, max_speed]. */ baud = clamp(tty->termios.c_ospeed, priv->min_speed, priv->max_speed); if (priv->use_actual_rate) baud = cp210x_get_actual_rate(baud); else if (baud < 1000000) baud = cp210x_get_an205_rate(baud); dev_dbg(&port->dev, "%s - setting baud rate to %u\n", __func__, baud); if (cp210x_write_u32_reg(port, CP210X_SET_BAUDRATE, baud)) { dev_warn(&port->dev, "failed to set baud rate to %u\n", baud); if (old_termios) baud = old_termios->c_ospeed; else baud = 9600; } tty_encode_baud_rate(tty, baud, baud); } static void cp210x_enable_event_mode(struct usb_serial_port *port) { struct cp210x_serial_private *priv = usb_get_serial_data(port->serial); struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int ret; if (port_priv->event_mode) return; if (priv->no_event_mode) return; port_priv->event_state = ES_DATA; port_priv->event_mode = true; ret = cp210x_write_u16_reg(port, CP210X_EMBED_EVENTS, CP210X_ESCCHAR); if (ret) { dev_err(&port->dev, "failed to enable events: %d\n", ret); port_priv->event_mode = false; } } static void cp210x_disable_event_mode(struct usb_serial_port *port) { struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); int ret; if (!port_priv->event_mode) return; ret = cp210x_write_u16_reg(port, CP210X_EMBED_EVENTS, 0); if (ret) { dev_err(&port->dev, "failed to disable events: %d\n", ret); return; } port_priv->event_mode = false; } static bool cp210x_termios_change(const struct ktermios *a, const struct ktermios *b) { bool iflag_change, cc_change; iflag_change = ((a->c_iflag ^ b->c_iflag) & (INPCK | IXON | IXOFF)); cc_change = a->c_cc[VSTART] != b->c_cc[VSTART] || a->c_cc[VSTOP] != b->c_cc[VSTOP]; return tty_termios_hw_change(a, b) || iflag_change || cc_change; } static void cp210x_set_flow_control(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct cp210x_serial_private *priv = usb_get_serial_data(port->serial); struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); struct cp210x_special_chars chars; struct cp210x_flow_ctl flow_ctl; u32 flow_repl; u32 ctl_hs; bool crtscts; int ret; /* * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum * CP2102N_E104). Report back that flow control is not supported. */ if (priv->no_flow_control) { tty->termios.c_cflag &= ~CRTSCTS; tty->termios.c_iflag &= ~(IXON | IXOFF); } if (tty->termios.c_ospeed != 0 && old_termios && old_termios->c_ospeed != 0 && C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) && I_IXON(tty) == (old_termios->c_iflag & IXON) && I_IXOFF(tty) == (old_termios->c_iflag & IXOFF) && START_CHAR(tty) == old_termios->c_cc[VSTART] && STOP_CHAR(tty) == old_termios->c_cc[VSTOP]) { return; } if (I_IXON(tty) || I_IXOFF(tty)) { memset(&chars, 0, sizeof(chars)); chars.bXonChar = START_CHAR(tty); chars.bXoffChar = STOP_CHAR(tty); ret = cp210x_write_reg_block(port, CP210X_SET_CHARS, &chars, sizeof(chars)); if (ret) { dev_err(&port->dev, "failed to set special chars: %d\n", ret); } } mutex_lock(&port_priv->mutex); if (tty->termios.c_ospeed == 0) { port_priv->dtr = false; port_priv->rts = false; } else if (old_termios && old_termios->c_ospeed == 0) { port_priv->dtr = true; port_priv->rts = true; } ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl, sizeof(flow_ctl)); if (ret) goto out_unlock; ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); ctl_hs &= ~CP210X_SERIAL_DSR_HANDSHAKE; ctl_hs &= ~CP210X_SERIAL_DCD_HANDSHAKE; ctl_hs &= ~CP210X_SERIAL_DSR_SENSITIVITY; ctl_hs &= ~CP210X_SERIAL_DTR_MASK; if (port_priv->dtr) ctl_hs |= CP210X_SERIAL_DTR_ACTIVE; else ctl_hs |= CP210X_SERIAL_DTR_INACTIVE; flow_repl &= ~CP210X_SERIAL_RTS_MASK; if (C_CRTSCTS(tty)) { ctl_hs |= CP210X_SERIAL_CTS_HANDSHAKE; if (port_priv->rts) flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL; else flow_repl |= CP210X_SERIAL_RTS_INACTIVE; crtscts = true; } else { ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE; if (port_priv->rts) flow_repl |= CP210X_SERIAL_RTS_ACTIVE; else flow_repl |= CP210X_SERIAL_RTS_INACTIVE; crtscts = false; } if (I_IXOFF(tty)) { flow_repl |= CP210X_SERIAL_AUTO_RECEIVE; flow_ctl.ulXonLimit = cpu_to_le32(128); flow_ctl.ulXoffLimit = cpu_to_le32(128); } else { flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE; } if (I_IXON(tty)) flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT; else flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT; dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__, ctl_hs, flow_repl); flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs); flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl, sizeof(flow_ctl)); if (ret) goto out_unlock; port_priv->crtscts = crtscts; out_unlock: mutex_unlock(&port_priv->mutex); } static void cp210x_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct cp210x_serial_private *priv = usb_get_serial_data(port->serial); u16 bits; int ret; if (old_termios && !cp210x_termios_change(&tty->termios, old_termios) && tty->termios.c_ospeed != 0) return; if (!old_termios || tty->termios.c_ospeed != old_termios->c_ospeed) cp210x_change_speed(tty, port, old_termios); /* CP2101 only supports CS8, 1 stop bit and non-stick parity. */ if (priv->partnum == CP210X_PARTNUM_CP2101) { tty->termios.c_cflag &= ~(CSIZE | CSTOPB | CMSPAR); tty->termios.c_cflag |= CS8; } bits = 0; switch (C_CSIZE(tty)) { case CS5: bits |= BITS_DATA_5; break; case CS6: bits |= BITS_DATA_6; break; case CS7: bits |= BITS_DATA_7; break; case CS8: default: bits |= BITS_DATA_8; break; } if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) bits |= BITS_PARITY_MARK; else bits |= BITS_PARITY_SPACE; } else { if (C_PARODD(tty)) bits |= BITS_PARITY_ODD; else bits |= BITS_PARITY_EVEN; } } if (C_CSTOPB(tty)) bits |= BITS_STOP_2; else bits |= BITS_STOP_1; ret = cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits); if (ret) dev_err(&port->dev, "failed to set line control: %d\n", ret); cp210x_set_flow_control(tty, port, old_termios); /* * Enable event-insertion mode only if input parity checking is * enabled for now. */ if (I_INPCK(tty)) cp210x_enable_event_mode(port); else cp210x_disable_event_mode(port); } static int cp210x_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return cp210x_tiocmset_port(port, set, clear); } static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); struct cp210x_flow_ctl flow_ctl; u32 ctl_hs, flow_repl; u16 control = 0; int ret; mutex_lock(&port_priv->mutex); if (set & TIOCM_RTS) { port_priv->rts = true; control |= CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (set & TIOCM_DTR) { port_priv->dtr = true; control |= CONTROL_DTR; control |= CONTROL_WRITE_DTR; } if (clear & TIOCM_RTS) { port_priv->rts = false; control &= ~CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (clear & TIOCM_DTR) { port_priv->dtr = false; control &= ~CONTROL_DTR; control |= CONTROL_WRITE_DTR; } /* * Use SET_FLOW to set DTR and enable/disable auto-RTS when hardware * flow control is enabled. */ if (port_priv->crtscts && control & CONTROL_WRITE_RTS) { ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl, sizeof(flow_ctl)); if (ret) goto out_unlock; ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); ctl_hs &= ~CP210X_SERIAL_DTR_MASK; if (port_priv->dtr) ctl_hs |= CP210X_SERIAL_DTR_ACTIVE; else ctl_hs |= CP210X_SERIAL_DTR_INACTIVE; flow_repl &= ~CP210X_SERIAL_RTS_MASK; if (port_priv->rts) flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL; else flow_repl |= CP210X_SERIAL_RTS_INACTIVE; flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs); flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__, ctl_hs, flow_repl); ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl, sizeof(flow_ctl)); } else { dev_dbg(&port->dev, "%s - control = 0x%04x\n", __func__, control); ret = cp210x_write_u16_reg(port, CP210X_SET_MHS, control); } out_unlock: mutex_unlock(&port_priv->mutex); return ret; } static void cp210x_dtr_rts(struct usb_serial_port *port, int on) { if (on) cp210x_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0); else cp210x_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS); } static int cp210x_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; u8 control; int result; result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control); if (result) return result; result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) |((control & CONTROL_RTS) ? TIOCM_RTS : 0) |((control & CONTROL_CTS) ? TIOCM_CTS : 0) |((control & CONTROL_DSR) ? TIOCM_DSR : 0) |((control & CONTROL_RING)? TIOCM_RI : 0) |((control & CONTROL_DCD) ? TIOCM_CD : 0); dev_dbg(&port->dev, "%s - control = 0x%02x\n", __func__, control); return result; } static int cp210x_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct cp210x_serial_private *priv = usb_get_serial_data(port->serial); u16 state; if (priv->partnum == CP210X_PARTNUM_CP2105) { if (cp210x_interface_num(port->serial) == 1) return -ENOTTY; } if (break_state == 0) state = BREAK_OFF; else state = BREAK_ON; dev_dbg(&port->dev, "%s - turning break %s\n", __func__, state == BREAK_OFF ? "off" : "on"); return cp210x_write_u16_reg(port, CP210X_SET_BREAK, state); } #ifdef CONFIG_GPIOLIB static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); u8 req_type; u16 mask; int result; int len; result = usb_autopm_get_interface(serial->interface); if (result) return result; switch (priv->partnum) { case CP210X_PARTNUM_CP2105: req_type = REQTYPE_INTERFACE_TO_HOST; len = 1; break; case CP210X_PARTNUM_CP2108: req_type = REQTYPE_INTERFACE_TO_HOST; len = 2; break; default: req_type = REQTYPE_DEVICE_TO_HOST; len = 1; break; } mask = 0; result = cp210x_read_vendor_block(serial, req_type, CP210X_READ_LATCH, &mask, len); usb_autopm_put_interface(serial->interface); if (result < 0) return result; le16_to_cpus(&mask); return !!(mask & BIT(gpio)); } static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); struct cp210x_gpio_write16 buf16; struct cp210x_gpio_write buf; u16 mask, state; u16 wIndex; int result; if (value == 1) state = BIT(gpio); else state = 0; mask = BIT(gpio); result = usb_autopm_get_interface(serial->interface); if (result) goto out; switch (priv->partnum) { case CP210X_PARTNUM_CP2105: buf.mask = (u8)mask; buf.state = (u8)state; result = cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE, CP210X_WRITE_LATCH, &buf, sizeof(buf)); break; case CP210X_PARTNUM_CP2108: buf16.mask = cpu_to_le16(mask); buf16.state = cpu_to_le16(state); result = cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE, CP210X_WRITE_LATCH, &buf16, sizeof(buf16)); break; default: wIndex = state << 8 | mask; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), CP210X_VENDOR_SPECIFIC, REQTYPE_HOST_TO_DEVICE, CP210X_WRITE_LATCH, wIndex, NULL, 0, USB_CTRL_SET_TIMEOUT); break; } usb_autopm_put_interface(serial->interface); out: if (result < 0) { dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n", result); } } static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); return priv->gpio_input & BIT(gpio); } static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); if (priv->partnum == CP210X_PARTNUM_CP2105) { /* hardware does not support an input mode */ return -ENOTSUPP; } /* push-pull pins cannot be changed to be inputs */ if (priv->gpio_pushpull & BIT(gpio)) return -EINVAL; /* make sure to release pin if it is being driven low */ cp210x_gpio_set(gc, gpio, 1); priv->gpio_input |= BIT(gpio); return 0; } static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); priv->gpio_input &= ~BIT(gpio); cp210x_gpio_set(gc, gpio, value); return 0; } static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio, unsigned long config) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); enum pin_config_param param = pinconf_to_config_param(config); /* Succeed only if in correct mode (this can't be set at runtime) */ if ((param == PIN_CONFIG_DRIVE_PUSH_PULL) && (priv->gpio_pushpull & BIT(gpio))) return 0; if ((param == PIN_CONFIG_DRIVE_OPEN_DRAIN) && !(priv->gpio_pushpull & BIT(gpio))) return 0; return -ENOTSUPP; } static int cp210x_gpio_init_valid_mask(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios) { struct usb_serial *serial = gpiochip_get_data(gc); struct cp210x_serial_private *priv = usb_get_serial_data(serial); struct device *dev = &serial->interface->dev; unsigned long altfunc_mask = priv->gpio_altfunc; bitmap_complement(valid_mask, &altfunc_mask, ngpios); if (bitmap_empty(valid_mask, ngpios)) dev_dbg(dev, "no pin configured for GPIO\n"); else dev_dbg(dev, "GPIO.%*pbl configured for GPIO\n", ngpios, valid_mask); return 0; } /* * This function is for configuring GPIO using shared pins, where other signals * are made unavailable by configuring the use of GPIO. This is believed to be * only applicable to the cp2105 at this point, the other devices supported by * this driver that provide GPIO do so in a way that does not impact other * signals and are thus expected to have very different initialisation. */ static int cp2105_gpioconf_init(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); struct cp210x_pin_mode mode; struct cp210x_dual_port_config config; u8 intf_num = cp210x_interface_num(serial); u8 iface_config; int result; result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_DEVICEMODE, &mode, sizeof(mode)); if (result < 0) return result; result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PORTCONFIG, &config, sizeof(config)); if (result < 0) return result; /* 2 banks of GPIO - One for the pins taken from each serial port */ if (intf_num == 0) { priv->gc.ngpio = 2; if (mode.eci == CP210X_PIN_MODE_MODEM) { /* mark all GPIOs of this interface as reserved */ priv->gpio_altfunc = 0xff; return 0; } iface_config = config.eci_cfg; priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) & CP210X_ECI_GPIO_MODE_MASK) >> CP210X_ECI_GPIO_MODE_OFFSET); } else if (intf_num == 1) { priv->gc.ngpio = 3; if (mode.sci == CP210X_PIN_MODE_MODEM) { /* mark all GPIOs of this interface as reserved */ priv->gpio_altfunc = 0xff; return 0; } iface_config = config.sci_cfg; priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) & CP210X_SCI_GPIO_MODE_MASK) >> CP210X_SCI_GPIO_MODE_OFFSET); } else { return -ENODEV; } /* mark all pins which are not in GPIO mode */ if (iface_config & CP2105_GPIO0_TXLED_MODE) /* GPIO 0 */ priv->gpio_altfunc |= BIT(0); if (iface_config & (CP2105_GPIO1_RXLED_MODE | /* GPIO 1 */ CP2105_GPIO1_RS485_MODE)) priv->gpio_altfunc |= BIT(1); /* driver implementation for CP2105 only supports outputs */ priv->gpio_input = 0; return 0; } static int cp2104_gpioconf_init(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); struct cp210x_single_port_config config; u8 iface_config; u8 gpio_latch; int result; u8 i; result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PORTCONFIG, &config, sizeof(config)); if (result < 0) return result; priv->gc.ngpio = 4; iface_config = config.device_cfg; priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) & CP210X_GPIO_MODE_MASK) >> CP210X_GPIO_MODE_OFFSET); gpio_latch = (u8)((le16_to_cpu(config.reset_state) & CP210X_GPIO_MODE_MASK) >> CP210X_GPIO_MODE_OFFSET); /* mark all pins which are not in GPIO mode */ if (iface_config & CP2104_GPIO0_TXLED_MODE) /* GPIO 0 */ priv->gpio_altfunc |= BIT(0); if (iface_config & CP2104_GPIO1_RXLED_MODE) /* GPIO 1 */ priv->gpio_altfunc |= BIT(1); if (iface_config & CP2104_GPIO2_RS485_MODE) /* GPIO 2 */ priv->gpio_altfunc |= BIT(2); /* * Like CP2102N, CP2104 has also no strict input and output pin * modes. * Do the same input mode emulation as CP2102N. */ for (i = 0; i < priv->gc.ngpio; ++i) { /* * Set direction to "input" iff pin is open-drain and reset * value is 1. */ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i))) priv->gpio_input |= BIT(i); } return 0; } static int cp2108_gpio_init(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); struct cp210x_quad_port_config config; u16 gpio_latch; int result; u8 i; result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PORTCONFIG, &config, sizeof(config)); if (result < 0) return result; priv->gc.ngpio = 16; priv->gpio_pushpull = le16_to_cpu(config.reset_state.gpio_mode_pb1); gpio_latch = le16_to_cpu(config.reset_state.gpio_latch_pb1); /* * Mark all pins which are not in GPIO mode. * * Refer to table 9.1 "GPIO Mode alternate Functions" in the datasheet: * https://www.silabs.com/documents/public/data-sheets/cp2108-datasheet.pdf * * Alternate functions of GPIO0 to GPIO3 are determine by enhancedfxn_ifc[0] * and the similarly for the other pins; enhancedfxn_ifc[1]: GPIO4 to GPIO7, * enhancedfxn_ifc[2]: GPIO8 to GPIO11, enhancedfxn_ifc[3]: GPIO12 to GPIO15. */ for (i = 0; i < 4; i++) { if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_TXLED) priv->gpio_altfunc |= BIT(i * 4); if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RXLED) priv->gpio_altfunc |= BIT((i * 4) + 1); if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RS485) priv->gpio_altfunc |= BIT((i * 4) + 2); if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_CLOCK) priv->gpio_altfunc |= BIT((i * 4) + 3); } /* * Like CP2102N, CP2108 has also no strict input and output pin * modes. Do the same input mode emulation as CP2102N. */ for (i = 0; i < priv->gc.ngpio; ++i) { /* * Set direction to "input" iff pin is open-drain and reset * value is 1. */ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i))) priv->gpio_input |= BIT(i); } return 0; } static int cp2102n_gpioconf_init(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); const u16 config_size = 0x02a6; u8 gpio_rst_latch; u8 config_version; u8 gpio_pushpull; u8 *config_buf; u8 gpio_latch; u8 gpio_ctrl; int result; u8 i; /* * Retrieve device configuration from the device. * The array received contains all customization settings done at the * factory/manufacturer. Format of the array is documented at the * time of writing at: * https://www.silabs.com/community/interface/knowledge-base.entry.html/2017/03/31/cp2102n_setconfig-xsfa */ config_buf = kmalloc(config_size, GFP_KERNEL); if (!config_buf) return -ENOMEM; result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_READ_2NCONFIG, config_buf, config_size); if (result < 0) { kfree(config_buf); return result; } config_version = config_buf[CP210X_2NCONFIG_CONFIG_VERSION_IDX]; gpio_pushpull = config_buf[CP210X_2NCONFIG_GPIO_MODE_IDX]; gpio_ctrl = config_buf[CP210X_2NCONFIG_GPIO_CONTROL_IDX]; gpio_rst_latch = config_buf[CP210X_2NCONFIG_GPIO_RSTLATCH_IDX]; kfree(config_buf); /* Make sure this is a config format we understand. */ if (config_version != 0x01) return -ENOTSUPP; priv->gc.ngpio = 4; /* * Get default pin states after reset. Needed so we can determine * the direction of an open-drain pin. */ gpio_latch = (gpio_rst_latch >> 3) & 0x0f; /* 0 indicates open-drain mode, 1 is push-pull */ priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f; /* 0 indicates GPIO mode, 1 is alternate function */ if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) { /* QFN20 is special... */ if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE) /* GPIO 0 */ priv->gpio_altfunc |= BIT(0); if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */ priv->gpio_altfunc |= BIT(1); if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */ priv->gpio_altfunc |= BIT(2); if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */ priv->gpio_altfunc |= BIT(3); } else { priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f; } if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) { /* * For the QFN28 package, GPIO4-6 are controlled by * the low three bits of the mode/latch fields. * Contrary to the document linked above, the bits for * the SUSPEND pins are elsewhere. No alternate * function is available for these pins. */ priv->gc.ngpio = 7; gpio_latch |= (gpio_rst_latch & 7) << 4; priv->gpio_pushpull |= (gpio_pushpull & 7) << 4; } /* * The CP2102N does not strictly has input and output pin modes, * it only knows open-drain and push-pull modes which is set at * factory. An open-drain pin can function both as an * input or an output. We emulate input mode for open-drain pins * by making sure they are not driven low, and we do not allow * push-pull pins to be set as an input. */ for (i = 0; i < priv->gc.ngpio; ++i) { /* * Set direction to "input" iff pin is open-drain and reset * value is 1. */ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i))) priv->gpio_input |= BIT(i); } return 0; } static int cp210x_gpio_init(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); int result; switch (priv->partnum) { case CP210X_PARTNUM_CP2104: result = cp2104_gpioconf_init(serial); break; case CP210X_PARTNUM_CP2105: result = cp2105_gpioconf_init(serial); break; case CP210X_PARTNUM_CP2108: /* * The GPIOs are not tied to any specific port so only register * once for interface 0. */ if (cp210x_interface_num(serial) != 0) return 0; result = cp2108_gpio_init(serial); break; case CP210X_PARTNUM_CP2102N_QFN28: case CP210X_PARTNUM_CP2102N_QFN24: case CP210X_PARTNUM_CP2102N_QFN20: result = cp2102n_gpioconf_init(serial); break; default: return 0; } if (result < 0) return result; priv->gc.label = "cp210x"; priv->gc.get_direction = cp210x_gpio_direction_get; priv->gc.direction_input = cp210x_gpio_direction_input; priv->gc.direction_output = cp210x_gpio_direction_output; priv->gc.get = cp210x_gpio_get; priv->gc.set = cp210x_gpio_set; priv->gc.set_config = cp210x_gpio_set_config; priv->gc.init_valid_mask = cp210x_gpio_init_valid_mask; priv->gc.owner = THIS_MODULE; priv->gc.parent = &serial->interface->dev; priv->gc.base = -1; priv->gc.can_sleep = true; result = gpiochip_add_data(&priv->gc, serial); if (!result) priv->gpio_registered = true; return result; } static void cp210x_gpio_remove(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); if (priv->gpio_registered) { gpiochip_remove(&priv->gc); priv->gpio_registered = false; } } #else static int cp210x_gpio_init(struct usb_serial *serial) { return 0; } static void cp210x_gpio_remove(struct usb_serial *serial) { /* Nothing to do */ } #endif static int cp210x_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv; port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); if (!port_priv) return -ENOMEM; port_priv->bInterfaceNumber = cp210x_interface_num(serial); mutex_init(&port_priv->mutex); usb_set_serial_port_data(port, port_priv); return 0; } static void cp210x_port_remove(struct usb_serial_port *port) { struct cp210x_port_private *port_priv; port_priv = usb_get_serial_port_data(port); kfree(port_priv); } static void cp210x_init_max_speed(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); bool use_actual_rate = false; speed_t min = 300; speed_t max; switch (priv->partnum) { case CP210X_PARTNUM_CP2101: max = 921600; break; case CP210X_PARTNUM_CP2102: case CP210X_PARTNUM_CP2103: max = 1000000; break; case CP210X_PARTNUM_CP2104: use_actual_rate = true; max = 2000000; break; case CP210X_PARTNUM_CP2108: max = 2000000; break; case CP210X_PARTNUM_CP2105: if (cp210x_interface_num(serial) == 0) { use_actual_rate = true; max = 2000000; /* ECI */ } else { min = 2400; max = 921600; /* SCI */ } break; case CP210X_PARTNUM_CP2102N_QFN28: case CP210X_PARTNUM_CP2102N_QFN24: case CP210X_PARTNUM_CP2102N_QFN20: use_actual_rate = true; max = 3000000; break; default: max = 2000000; break; } priv->min_speed = min; priv->max_speed = max; priv->use_actual_rate = use_actual_rate; } static void cp2102_determine_quirks(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); u8 *buf; int ret; buf = kmalloc(2, GFP_KERNEL); if (!buf) return; /* * Some (possibly counterfeit) CP2102 do not support event-insertion * mode and respond differently to malformed vendor requests. * Specifically, they return one instead of two bytes when sent a * two-byte part-number request. */ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT); if (ret == 1) { dev_dbg(&serial->interface->dev, "device does not support event-insertion mode\n"); priv->no_event_mode = true; } kfree(buf); } static int cp210x_get_fw_version(struct usb_serial *serial, u16 value) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); u8 ver[3]; int ret; ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value, ver, sizeof(ver)); if (ret) return ret; dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__, ver[0], ver[1], ver[2]); priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2]; return 0; } static void cp210x_determine_type(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); int ret; ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PARTNUM, &priv->partnum, sizeof(priv->partnum)); if (ret < 0) { dev_warn(&serial->interface->dev, "querying part number failed\n"); priv->partnum = CP210X_PARTNUM_UNKNOWN; return; } dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum); switch (priv->partnum) { case CP210X_PARTNUM_CP2102: cp2102_determine_quirks(serial); break; case CP210X_PARTNUM_CP2105: case CP210X_PARTNUM_CP2108: cp210x_get_fw_version(serial, CP210X_GET_FW_VER); break; case CP210X_PARTNUM_CP2102N_QFN28: case CP210X_PARTNUM_CP2102N_QFN24: case CP210X_PARTNUM_CP2102N_QFN20: ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N); if (ret) break; if (priv->fw_version <= 0x10004) priv->no_flow_control = true; break; default: break; } } static int cp210x_attach(struct usb_serial *serial) { int result; struct cp210x_serial_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usb_set_serial_data(serial, priv); cp210x_determine_type(serial); cp210x_init_max_speed(serial); result = cp210x_gpio_init(serial); if (result < 0) { dev_err(&serial->interface->dev, "GPIO initialisation failed: %d\n", result); } return 0; } static void cp210x_disconnect(struct usb_serial *serial) { cp210x_gpio_remove(serial); } static void cp210x_release(struct usb_serial *serial) { struct cp210x_serial_private *priv = usb_get_serial_data(serial); cp210x_gpio_remove(serial); kfree(priv); } module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
11 11 11 11 11 11 11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI sockets. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/utsname.h> #include <linux/sched.h> #include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> #include <net/bluetooth/mgmt.h> #include "mgmt_util.h" static LIST_HEAD(mgmt_chan_list); static DEFINE_MUTEX(mgmt_chan_list_lock); static DEFINE_IDA(sock_cookie_ida); static atomic_t monitor_promisc = ATOMIC_INIT(0); /* ----- HCI socket interface ----- */ /* Socket info */ #define hci_pi(sk) ((struct hci_pinfo *) sk) struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; __u8 cmsg_mask; unsigned short channel; unsigned long flags; __u32 cookie; char comm[TASK_COMM_LEN]; __u16 mtu; }; static struct hci_dev *hci_hdev_from_sock(struct sock *sk) { struct hci_dev *hdev = hci_pi(sk)->hdev; if (!hdev) return ERR_PTR(-EBADFD); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) return ERR_PTR(-EPIPE); return hdev; } void hci_sock_set_flag(struct sock *sk, int nr) { set_bit(nr, &hci_pi(sk)->flags); } void hci_sock_clear_flag(struct sock *sk, int nr) { clear_bit(nr, &hci_pi(sk)->flags); } int hci_sock_test_flag(struct sock *sk, int nr) { return test_bit(nr, &hci_pi(sk)->flags); } unsigned short hci_sock_get_channel(struct sock *sk) { return hci_pi(sk)->channel; } u32 hci_sock_get_cookie(struct sock *sk) { return hci_pi(sk)->cookie; } static bool hci_sock_gen_cookie(struct sock *sk) { int id = hci_pi(sk)->cookie; if (!id) { id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL); if (id < 0) id = 0xffffffff; hci_pi(sk)->cookie = id; get_task_comm(hci_pi(sk)->comm, current); return true; } return false; } static void hci_sock_free_cookie(struct sock *sk) { int id = hci_pi(sk)->cookie; if (id) { hci_pi(sk)->cookie = 0xffffffff; ida_free(&sock_cookie_ida, id); } } static inline int hci_test_bit(int nr, const void *addr) { return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); } /* Security filter */ #define HCI_SFLT_MAX_OGF 5 struct hci_sec_filter { __u32 type_mask; __u32 event_mask[2]; __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; }; static const struct hci_sec_filter hci_sec_filter = { /* Packet types */ 0x10, /* Events */ { 0x1000d9fe, 0x0000b00c }, /* Commands */ { { 0x0 }, /* OGF_LINK_CTL */ { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, /* OGF_LINK_POLICY */ { 0x00005200, 0x00000000, 0x00000000, 0x00 }, /* OGF_HOST_CTL */ { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, /* OGF_INFO_PARAM */ { 0x000002be, 0x00000000, 0x00000000, 0x00 }, /* OGF_STATUS_PARAM */ { 0x000000ea, 0x00000000, 0x00000000, 0x00 } } }; static struct bt_sock_list hci_sk_list = { .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) }; static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) { struct hci_filter *flt; int flt_type, flt_event; /* Apply filter */ flt = &hci_pi(sk)->filter; flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS; if (!test_bit(flt_type, &flt->type_mask)) return true; /* Extra filter for event packets only */ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) return false; flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(flt_event, &flt->event_mask)) return true; /* Check filter only when opcode is set */ if (!flt->opcode) return false; if (flt_event == HCI_EV_CMD_COMPLETE && flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) return true; if (flt_event == HCI_EV_CMD_STATUS && flt->opcode != get_unaligned((__le16 *)(skb->data + 4))) return true; return false; } /* Send frame to RAW socket */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) { struct sock *sk; struct sk_buff *skb_copy = NULL; BT_DBG("hdev %p len %d", hdev, skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) continue; if (is_filtered_packet(sk, skb)) continue; } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { if (!bt_cb(skb)->incoming) continue; if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) continue; } else { /* Don't send frame to other channel types */ continue; } if (!skb_copy) { /* Create a private copy with headroom */ skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true); if (!skb_copy) continue; /* Put type byte before the data */ memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); kfree_skb(skb_copy); } static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb) { struct scm_creds *creds; if (!sk || WARN_ON(!skb)) return; creds = &bt_cb(skb)->creds; /* Check if peer credentials is set */ if (!sk->sk_peer_pid) { /* Check if parent peer credentials is set */ if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid) sk = bt_sk(sk)->parent; else return; } /* Check if scm_creds already set */ if (creds->pid == pid_vnr(sk->sk_peer_pid)) return; memset(creds, 0, sizeof(*creds)); creds->pid = pid_vnr(sk->sk_peer_pid); if (sk->sk_peer_cred) { creds->uid = sk->sk_peer_cred->uid; creds->gid = sk->sk_peer_cred->gid; } } static struct sk_buff *hci_skb_clone(struct sk_buff *skb) { struct sk_buff *nskb; if (!skb) return NULL; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return NULL; hci_sock_copy_creds(skb->sk, nskb); return nskb; } /* Send frame to sockets with specific channel */ static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { struct sock *sk; BT_DBG("channel %u len %d", channel, skb->len); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; /* Ignore socket without the flag set */ if (!hci_sock_test_flag(sk, flag)) continue; /* Skip the original socket */ if (sk == skip_sk) continue; if (sk->sk_state != BT_BOUND) continue; if (hci_pi(sk)->channel != channel) continue; nskb = hci_skb_clone(skb); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } } void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { read_lock(&hci_sk_list.lock); __hci_send_to_channel(channel, skb, flag, skip_sk); read_unlock(&hci_sk_list.lock); } /* Send frame to monitor socket */ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) { struct sk_buff *skb_copy = NULL; struct hci_mon_hdr *hdr; __le16 opcode; if (!atomic_read(&monitor_promisc)) return; BT_DBG("hdev %p len %d", hdev, skb->len); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: opcode = cpu_to_le16(HCI_MON_COMMAND_PKT); break; case HCI_EVENT_PKT: opcode = cpu_to_le16(HCI_MON_EVENT_PKT); break; case HCI_ACLDATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT); else opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT); break; case HCI_SCODATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT); else opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); break; case HCI_ISODATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT); else opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); break; case HCI_DIAG_PKT: opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); break; default: return; } /* Create a private copy with headroom */ skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true); if (!skb_copy) return; hci_sock_copy_creds(skb->sk, skb_copy); /* Put header before the data */ hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len); hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb_copy); } void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, void *data, u16 data_len, ktime_t tstamp, int flag, struct sock *skip_sk) { struct sock *sk; __le16 index; if (hdev) index = cpu_to_le16(hdev->id); else index = cpu_to_le16(MGMT_INDEX_NONE); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct hci_mon_hdr *hdr; struct sk_buff *skb; if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) continue; /* Ignore socket without the flag set */ if (!hci_sock_test_flag(sk, flag)) continue; /* Skip the original socket */ if (sk == skip_sk) continue; skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC); if (!skb) continue; put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(event, skb_put(skb, 2)); if (data) skb_put_data(skb, data, data_len); skb->tstamp = tstamp; hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); hdr->index = index; hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } read_unlock(&hci_sk_list.lock); } static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) { struct hci_mon_hdr *hdr; struct hci_mon_new_index *ni; struct hci_mon_index_info *ii; struct sk_buff *skb; __le16 opcode; switch (event) { case HCI_DEV_REG: skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); if (!skb) return NULL; ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); ni->type = 0x00; /* Old hdev->dev_type */ ni->bus = hdev->bus; bacpy(&ni->bdaddr, &hdev->bdaddr); memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name, strnlen(hdev->name, sizeof(ni->name)), '\0'); opcode = cpu_to_le16(HCI_MON_NEW_INDEX); break; case HCI_DEV_UNREG: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_DEL_INDEX); break; case HCI_DEV_SETUP: if (hdev->manufacturer == 0xffff) return NULL; fallthrough; case HCI_DEV_UP: skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); if (!skb) return NULL; ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE); bacpy(&ii->bdaddr, &hdev->bdaddr); ii->manufacturer = cpu_to_le16(hdev->manufacturer); opcode = cpu_to_le16(HCI_MON_INDEX_INFO); break; case HCI_DEV_OPEN: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); break; case HCI_DEV_CLOSE: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); break; default: return NULL; } __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_open(struct sock *sk) { struct hci_mon_hdr *hdr; struct sk_buff *skb; u16 format; u8 ver[3]; u32 flags; /* No message needed when cookie is not present */ if (!hci_pi(sk)->cookie) return NULL; switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: format = 0x0000; ver[0] = BT_SUBSYS_VERSION; put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); break; case HCI_CHANNEL_USER: format = 0x0001; ver[0] = BT_SUBSYS_VERSION; put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); break; case HCI_CHANNEL_CONTROL: format = 0x0002; mgmt_fill_version_info(ver); break; default: /* No message for unsupported format */ return NULL; } skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0; put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(format, skb_put(skb, 2)); skb_put_data(skb, ver, sizeof(ver)); put_unaligned_le32(flags, skb_put(skb, 4)); skb_put_u8(skb, TASK_COMM_LEN); skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN); if (hci_pi(sk)->hdev) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); else hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_close(struct sock *sk) { struct hci_mon_hdr *hdr; struct sk_buff *skb; /* No message needed when cookie is not present */ if (!hci_pi(sk)->cookie) return NULL; switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: break; default: /* No message for unsupported format */ return NULL; } skb = bt_skb_alloc(4, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE); if (hci_pi(sk)->hdev) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); else hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index, u16 opcode, u16 len, const void *buf) { struct hci_mon_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(6 + len, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(opcode, skb_put(skb, 2)); if (buf) skb_put_data(skb, buf, len); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static void __printf(2, 3) send_monitor_note(struct sock *sk, const char *fmt, ...) { size_t len; struct hci_mon_hdr *hdr; struct sk_buff *skb; va_list args; va_start(args, fmt); len = vsnprintf(NULL, 0, fmt, args); va_end(args); skb = bt_skb_alloc(len + 1, GFP_ATOMIC); if (!skb) return; hci_sock_copy_creds(sk, skb); va_start(args, fmt); vsprintf(skb_put(skb, len), fmt, args); *(u8 *)skb_put(skb, 1) = 0; va_end(args); __net_timestamp(skb); hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE); hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); } static void send_monitor_replay(struct sock *sk) { struct hci_dev *hdev; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { struct sk_buff *skb; skb = create_monitor_event(hdev, HCI_DEV_REG); if (!skb) continue; if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); if (!test_bit(HCI_RUNNING, &hdev->flags)) continue; skb = create_monitor_event(hdev, HCI_DEV_OPEN); if (!skb) continue; if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); if (test_bit(HCI_UP, &hdev->flags)) skb = create_monitor_event(hdev, HCI_DEV_UP); else if (hci_dev_test_flag(hdev, HCI_SETUP)) skb = create_monitor_event(hdev, HCI_DEV_SETUP); else skb = NULL; if (skb) { if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); } } read_unlock(&hci_dev_list_lock); } static void send_monitor_control_replay(struct sock *mon_sk) { struct sock *sk; read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *skb; skb = create_monitor_ctrl_open(sk); if (!skb) continue; if (sock_queue_rcv_skb(mon_sk, skb)) kfree_skb(skb); } read_unlock(&hci_sk_list.lock); } /* Generate internal stack event */ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) { struct hci_event_hdr *hdr; struct hci_ev_stack_internal *ev; struct sk_buff *skb; skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); if (!skb) return; hdr = skb_put(skb, HCI_EVENT_HDR_SIZE); hdr->evt = HCI_EV_STACK_INTERNAL; hdr->plen = sizeof(*ev) + dlen; ev = skb_put(skb, sizeof(*ev) + dlen); ev->type = type; memcpy(ev->data, data, dlen); bt_cb(skb)->incoming = 1; __net_timestamp(skb); hci_skb_pkt_type(skb) = HCI_EVENT_PKT; hci_send_to_sock(hdev, skb); kfree_skb(skb); } void hci_sock_dev_event(struct hci_dev *hdev, int event) { BT_DBG("hdev %s event %d", hdev->name, event); if (atomic_read(&monitor_promisc)) { struct sk_buff *skb; /* Send event to monitor */ skb = create_monitor_event(hdev, event); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } if (event <= HCI_DEV_DOWN) { struct hci_ev_si_device ev; /* Send event to sockets */ ev.event = event; ev.dev_id = hdev->id; hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); } if (event == HCI_DEV_UNREG) { struct sock *sk; /* Wake up sockets using this dead device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { if (hci_pi(sk)->hdev == hdev) { sk->sk_err = EPIPE; sk->sk_state_change(sk); } } read_unlock(&hci_sk_list.lock); } } static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel) { struct hci_mgmt_chan *c; list_for_each_entry(c, &mgmt_chan_list, list) { if (c->channel == channel) return c; } return NULL; } static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel) { struct hci_mgmt_chan *c; mutex_lock(&mgmt_chan_list_lock); c = __hci_mgmt_chan_find(channel); mutex_unlock(&mgmt_chan_list_lock); return c; } int hci_mgmt_chan_register(struct hci_mgmt_chan *c) { if (c->channel < HCI_CHANNEL_CONTROL) return -EINVAL; mutex_lock(&mgmt_chan_list_lock); if (__hci_mgmt_chan_find(c->channel)) { mutex_unlock(&mgmt_chan_list_lock); return -EALREADY; } list_add_tail(&c->list, &mgmt_chan_list); mutex_unlock(&mgmt_chan_list_lock); return 0; } EXPORT_SYMBOL(hci_mgmt_chan_register); void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) { mutex_lock(&mgmt_chan_list_lock); list_del(&c->list); mutex_unlock(&mgmt_chan_list_lock); } EXPORT_SYMBOL(hci_mgmt_chan_unregister); static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct hci_dev *hdev; struct sk_buff *skb; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_MONITOR: atomic_dec(&monitor_promisc); break; case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: /* Send event to monitor */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } hci_sock_free_cookie(sk); break; } bt_sock_unlink(&hci_sk_list, sk); hdev = hci_pi(sk)->hdev; if (hdev) { if (hci_pi(sk)->channel == HCI_CHANNEL_USER && !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { /* When releasing a user channel exclusive access, * call hci_dev_do_close directly instead of calling * hci_dev_close to ensure the exclusive access will * be released and the controller brought back down. * * The checking of HCI_AUTO_OFF is not needed in this * case since it will have been cleared already when * opening the user channel. * * Make sure to also check that we haven't already * unregistered since all the cleanup will have already * been complete and hdev will get released when we put * below. */ hci_dev_do_close(hdev); hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); mgmt_index_added(hdev); } atomic_dec(&hdev->promisc); hci_dev_put(hdev); } sock_orphan(sk); release_sock(sk); sock_put(sk); return 0; } static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR); hci_dev_unlock(hdev); return err; } static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR); hci_dev_unlock(hdev); return err; } /* Ioctls that require bound socket */ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) return PTR_ERR(hdev); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return -EOPNOTSUPP; switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) return -EPERM; return -EOPNOTSUPP; case HCIGETCONNINFO: return hci_get_conn_info(hdev, (void __user *)arg); case HCIGETAUTHINFO: return hci_get_auth_info(hdev, (void __user *)arg); case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_reject_list_add(hdev, (void __user *)arg); case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_reject_list_del(hdev, (void __user *)arg); } return -ENOIOCTLCMD; } static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct sock *sk = sock->sk; int err; BT_DBG("cmd %x arg %lx", cmd, arg); /* Make sure the cmd is valid before doing anything */ switch (cmd) { case HCIGETDEVLIST: case HCIGETDEVINFO: case HCIGETCONNLIST: case HCIDEVUP: case HCIDEVDOWN: case HCIDEVRESET: case HCIDEVRESTAT: case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: case HCIINQUIRY: case HCISETRAW: case HCIGETCONNINFO: case HCIGETAUTHINFO: case HCIBLOCKADDR: case HCIUNBLOCKADDR: break; default: return -ENOIOCTLCMD; } lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } /* When calling an ioctl on an unbound raw socket, then ensure * that the monitor gets informed. Ensure that the resulting event * is only send once by checking if the cookie exists or not. The * socket cookie will be only ever generated once for the lifetime * of a given socket. */ if (hci_sock_gen_cookie(sk)) { struct sk_buff *skb; /* Perform careful checks before setting the HCI_SOCK_TRUSTED * flag. Make sure that not only the current task but also * the socket opener has the required capability, since * privileged programs can be tricked into making ioctl calls * on HCI sockets, and the socket should not be marked as * trusted simply because the ioctl caller is privileged. */ if (sk_capable(sk, CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } release_sock(sk); switch (cmd) { case HCIGETDEVLIST: return hci_get_dev_list(argp); case HCIGETDEVINFO: return hci_get_dev_info(argp); case HCIGETCONNLIST: return hci_get_conn_list(argp); case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_open(arg); case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_close(arg); case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset(arg); case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset_stat(arg); case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_cmd(cmd, argp); case HCIINQUIRY: return hci_inquiry(argp); } lock_sock(sk); err = hci_sock_bound_ioctl(sk, cmd, arg); done: release_sock(sk); return err; } #ifdef CONFIG_COMPAT static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { switch (cmd) { case HCIDEVUP: case HCIDEVDOWN: case HCIDEVRESET: case HCIDEVRESTAT: return hci_sock_ioctl(sock, cmd, arg); } return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci haddr; struct sock *sk = sock->sk; struct hci_dev *hdev = NULL; struct sk_buff *skb; int len, err = 0; BT_DBG("sock %p sk %p", sock, sk); if (!addr) return -EINVAL; memset(&haddr, 0, sizeof(haddr)); len = min_t(unsigned int, sizeof(haddr), addr_len); memcpy(&haddr, addr, len); if (haddr.hci_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); /* Allow detaching from dead device and attaching to alive device, if * the caller wants to re-bind (instead of close) this socket in * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. */ hdev = hci_pi(sk)->hdev; if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { hci_pi(sk)->hdev = NULL; sk->sk_state = BT_OPEN; hci_dev_put(hdev); } hdev = NULL; if (sk->sk_state == BT_BOUND) { err = -EALREADY; goto done; } switch (haddr.hci_channel) { case HCI_CHANNEL_RAW: if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr.hci_dev != HCI_DEV_NONE) { hdev = hci_dev_get(haddr.hci_dev); if (!hdev) { err = -ENODEV; goto done; } atomic_inc(&hdev->promisc); } hci_pi(sk)->channel = haddr.hci_channel; if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been assigned, * then there has been already an ioctl issued against * an unbound socket and with that triggered an open * notification. Send a close notification first to * allow the state transition to bounded. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } if (capable(CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->hdev = hdev; /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } break; case HCI_CHANNEL_USER: if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr.hci_dev == HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_ADMIN)) { err = -EPERM; goto done; } hdev = hci_dev_get(haddr.hci_dev); if (!hdev) { err = -ENODEV; goto done; } if (test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && test_bit(HCI_UP, &hdev->flags))) { err = -EBUSY; hci_dev_put(hdev); goto done; } if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) { err = -EUSERS; hci_dev_put(hdev); goto done; } mgmt_index_removed(hdev); err = hci_dev_open(hdev->id); if (err) { if (err == -EALREADY) { /* In case the transport is already up and * running, clear the error here. * * This can happen when opening a user * channel and HCI_AUTO_OFF grace period * is still active. */ err = 0; } else { hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); mgmt_index_added(hdev); hci_dev_put(hdev); goto done; } } hci_pi(sk)->channel = haddr.hci_channel; if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been assigned, * this socket will transition from a raw socket into * a user channel socket. For a clean transition, send * the close notification first. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } /* The user channel is restricted to CAP_NET_ADMIN * capabilities and with that implicitly trusted. */ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->hdev = hdev; /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } atomic_inc(&hdev->promisc); break; case HCI_CHANNEL_MONITOR: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_RAW)) { err = -EPERM; goto done; } hci_pi(sk)->channel = haddr.hci_channel; /* The monitor interface is restricted to CAP_NET_RAW * capabilities and with that implicitly trusted. */ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); send_monitor_note(sk, "Linux version %s (%s)", init_utsname()->release, init_utsname()->machine); send_monitor_note(sk, "Bluetooth subsystem version %u.%u", BT_SUBSYS_VERSION, BT_SUBSYS_REVISION); send_monitor_replay(sk); send_monitor_control_replay(sk); atomic_inc(&monitor_promisc); break; case HCI_CHANNEL_LOGGING: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_ADMIN)) { err = -EPERM; goto done; } hci_pi(sk)->channel = haddr.hci_channel; break; default: if (!hci_mgmt_chan_find(haddr.hci_channel)) { err = -EINVAL; goto done; } if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } /* Users with CAP_NET_ADMIN capabilities are allowed * access to all management commands and events. For * untrusted users the interface is restricted and * also only untrusted events are sent. */ if (capable(CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->channel = haddr.hci_channel; /* At the moment the index and unconfigured index events * are enabled unconditionally. Setting them on each * socket when binding keeps this functionality. They * however might be cleared later and then sending of these * events will be disabled, but that is then intentional. * * This also enables generic events that are safe to be * received by untrusted users. Example for such events * are changes to settings, class of device, name etc. */ if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) { if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been * assigned, this socket will transition from * a raw socket into a control socket. To * allow for a clean transition, send the * close notification first. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); } break; } /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ if (!hci_pi(sk)->mtu) hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr; struct sock *sk = sock->sk; struct hci_dev *hdev; int err = 0; BT_DBG("sock %p sk %p", sock, sk); if (peer) return -EOPNOTSUPP; lock_sock(sk); hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) { err = PTR_ERR(hdev); goto done; } haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; haddr->hci_channel= hci_pi(sk)->channel; err = sizeof(*haddr); done: release_sock(sk); return err; } static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u8 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { #ifdef CONFIG_COMPAT struct old_timeval32 ctv; #endif struct __kernel_old_timeval tv; void *data; int len; skb_get_timestamp(skb, &tv); data = &tv; len = sizeof(tv); #ifdef CONFIG_COMPAT if (!COMPAT_USE_64BIT_TIME && (msg->msg_flags & MSG_CMSG_COMPAT)) { ctv.tv_sec = tv.tv_sec; ctv.tv_usec = tv.tv_usec; data = &ctv; len = sizeof(ctv); } #endif put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); } } static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct scm_cookie scm; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; unsigned int skblen; BT_DBG("sock %p, sk %p", sock, sk); if (flags & MSG_OOB) return -EOPNOTSUPP; if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; skblen = skb->len; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; default: if (hci_mgmt_chan_find(hci_pi(sk)->channel)) sock_recv_timestamp(msg, sk, skb); break; } memset(&scm, 0, sizeof(scm)); scm.creds = bt_cb(skb)->creds; skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; scm_recv(sock, msg, &scm, flags); return err ? : copied; } static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, struct sk_buff *skb) { u8 *cp; struct mgmt_hdr *hdr; u16 opcode, index, len; struct hci_dev *hdev = NULL; const struct hci_mgmt_handler *handler; bool var_len, no_hdev; int err; BT_DBG("got %d bytes", skb->len); if (skb->len < sizeof(*hdr)) return -EINVAL; hdr = (void *)skb->data; opcode = __le16_to_cpu(hdr->opcode); index = __le16_to_cpu(hdr->index); len = __le16_to_cpu(hdr->len); if (len != skb->len - sizeof(*hdr)) { err = -EINVAL; goto done; } if (chan->channel == HCI_CHANNEL_CONTROL) { struct sk_buff *cmd; /* Send event to monitor */ cmd = create_monitor_ctrl_command(sk, index, opcode, len, skb->data + sizeof(*hdr)); if (cmd) { hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, HCI_SOCK_TRUSTED, NULL); kfree_skb(cmd); } } if (opcode >= chan->handler_count || chan->handlers[opcode].func == NULL) { BT_DBG("Unknown op %u", opcode); err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_UNKNOWN_COMMAND); goto done; } handler = &chan->handlers[opcode]; if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && !(handler->flags & HCI_MGMT_UNTRUSTED)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_PERMISSION_DENIED); goto done; } if (index != MGMT_INDEX_NONE) { hdev = hci_dev_get(index); if (!hdev) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !(handler->flags & HCI_MGMT_UNCONFIGURED)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } } if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) { no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); if (no_hdev != !hdev) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } } var_len = (handler->flags & HCI_MGMT_VAR_LEN); if ((var_len && len < handler->data_len) || (!var_len && len != handler->data_len)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_PARAMS); goto done; } if (hdev && chan->hdev_init) chan->hdev_init(sk, hdev); cp = skb->data + sizeof(*hdr); err = handler->func(sk, hdev, cp, len); if (err < 0) goto done; err = skb->len; done: if (hdev) hci_dev_put(hdev); return err; } static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, unsigned int flags) { struct hci_mon_hdr *hdr; struct hci_dev *hdev; u16 index; int err; /* The logging frame consists at minimum of the standard header, * the priority byte, the ident length byte and at least one string * terminator NUL byte. Anything shorter are invalid packets. */ if (skb->len < sizeof(*hdr) + 3) return -EINVAL; hdr = (void *)skb->data; if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) return -EINVAL; if (__le16_to_cpu(hdr->opcode) == 0x0000) { __u8 priority = skb->data[sizeof(*hdr)]; __u8 ident_len = skb->data[sizeof(*hdr) + 1]; /* Only the priorities 0-7 are valid and with that any other * value results in an invalid packet. * * The priority byte is followed by an ident length byte and * the NUL terminated ident string. Check that the ident * length is not overflowing the packet and also that the * ident string itself is NUL terminated. In case the ident * length is zero, the length value actually doubles as NUL * terminator identifier. * * The message follows the ident string (if present) and * must be NUL terminated. Otherwise it is not a valid packet. */ if (priority > 7 || skb->data[skb->len - 1] != 0x00 || ident_len > skb->len - sizeof(*hdr) - 3 || skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) return -EINVAL; } else { return -EINVAL; } index = __le16_to_cpu(hdr->index); if (index != MGMT_INDEX_NONE) { hdev = hci_dev_get(index); if (!hdev) return -ENODEV; } else { hdev = NULL; } hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); err = skb->len; if (hdev) hci_dev_put(hdev); return err; } static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct hci_mgmt_chan *chan; struct hci_dev *hdev; struct sk_buff *skb; int err; const unsigned int flags = msg->msg_flags; BT_DBG("sock %p sk %p", sock, sk); if (flags & MSG_OOB) return -EOPNOTSUPP; if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) return -EINVAL; if (len < 4 || len > hci_pi(sk)->mtu) return -EINVAL; skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); if (IS_ERR(skb)) return PTR_ERR(skb); lock_sock(sk); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: break; case HCI_CHANNEL_MONITOR: err = -EOPNOTSUPP; goto drop; case HCI_CHANNEL_LOGGING: err = hci_logging_frame(sk, skb, flags); goto drop; default: mutex_lock(&mgmt_chan_list_lock); chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); if (chan) err = hci_mgmt_cmd(chan, sk, skb); else err = -EINVAL; mutex_unlock(&mgmt_chan_list_lock); goto drop; } hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) { err = PTR_ERR(hdev); goto drop; } if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto drop; } hci_skb_pkt_type(skb) = skb->data[0]; skb_pull(skb, 1); if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { /* No permission check is needed for user channel * since that gets enforced when binding the socket. * * However check that the packet type is valid. */ if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { err = -EINVAL; goto drop; } skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { u16 opcode = get_unaligned_le16(skb->data); u16 ogf = hci_opcode_ogf(opcode); u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } /* Since the opcode has already been extracted here, store * a copy of the value for later use by the drivers. */ hci_skb_opcode(skb) = opcode; if (ogf == 0x3f) { skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } } else { if (!capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { err = -EINVAL; goto drop; } skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } err = len; done: release_sock(sk); return err; drop: kfree_skb(skb); goto done; } static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; int err = 0, opt = 0; BT_DBG("sk %p, opt %d", sk, optname); lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } switch (optname) { case HCI_DATA_DIR: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; break; case HCI_TIME_STAMP: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen); if (err) break; if (!capable(CAP_NET_RAW)) { uf.type_mask &= hci_sec_filter.type_mask; uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); } { struct hci_filter *f = &hci_pi(sk)->filter; f->type_mask = uf.type_mask; f->opcode = uf.opcode; *((u32 *) f->event_mask + 0) = uf.event_mask[0]; *((u32 *) f->event_mask + 1) = uf.event_mask[1]; } break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u16 opt; BT_DBG("sk %p, opt %d", sk, optname); if (level == SOL_HCI) return hci_sock_setsockopt_old(sock, level, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SNDMTU: case BT_RCVMTU: switch (hci_pi(sk)->channel) { /* Don't allow changing MTU for channels that are meant for HCI * traffic only. */ case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: err = -ENOPROTOOPT; goto done; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; hci_pi(sk)->mtu = opt; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt, err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (level == SOL_HCI) return hci_sock_getsockopt_old(sock, level, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SNDMTU: case BT_RCVMTU: if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static void hci_sock_destruct(struct sock *sk) { mgmt_cleanup(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = hci_sock_release, .bind = hci_sock_bind, .getname = hci_sock_getname, .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hci_sock_compat_ioctl, #endif .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, .getsockopt = hci_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto hci_sk_proto = { .name = "HCI", .owner = THIS_MODULE, .obj_size = sizeof(struct hci_pinfo) }; static int hci_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &hci_sock_ops; sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sock->state = SS_UNCONNECTED; sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); return 0; } static const struct net_proto_family hci_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = hci_sock_create, }; int __init hci_sock_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr)); err = proto_register(&hci_sk_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); if (err < 0) { BT_ERR("HCI socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HCI proc file"); bt_sock_unregister(BTPROTO_HCI); goto error; } BT_INFO("HCI socket layer initialized"); return 0; error: proto_unregister(&hci_sk_proto); return err; } void hci_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "hci"); bt_sock_unregister(BTPROTO_HCI); proto_unregister(&hci_sk_proto); }
3175 3213 1419 3183 1646 657 124 53 235 90 929 60 3197 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * This file holds USB constants and structures that are needed for * USB device APIs. These are used by the USB device model, which is * defined in chapter 9 of the USB 2.0 specification and in the * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that * need these: * * - the master/host side Linux-USB kernel driver API; * - the "usbfs" user space API; and * - the Linux "gadget" slave/device/peripheral side driver API. * * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems * act either as a USB master/host or as a USB slave/device. That means * the master and slave side APIs benefit from working well together. * * Note all descriptors are declared '__attribute__((packed))' so that: * * [a] they never get padded, either internally (USB spec writers * probably handled that) or externally; * * [b] so that accessing bigger-than-a-bytes fields will never * generate bus errors on any platform, even when the location of * its descriptor inside a bundle isn't "naturally aligned", and * * [c] for consistency, removing all doubt even when it appears to * someone that the two other points are non-issues for that * particular descriptor type. */ #ifndef _UAPI__LINUX_USB_CH9_H #define _UAPI__LINUX_USB_CH9_H #include <linux/types.h> /* __u8 etc */ #include <asm/byteorder.h> /* le16_to_cpu */ /*-------------------------------------------------------------------------*/ /* CONTROL REQUEST SUPPORT */ /* * USB directions * * This bit flag is used in endpoint descriptors' bEndpointAddress field. * It's also one of three fields in control requests bRequestType. */ #define USB_DIR_OUT 0 /* to device */ #define USB_DIR_IN 0x80 /* to host */ /* * USB types, the second of three bRequestType fields */ #define USB_TYPE_MASK (0x03 << 5) #define USB_TYPE_STANDARD (0x00 << 5) #define USB_TYPE_CLASS (0x01 << 5) #define USB_TYPE_VENDOR (0x02 << 5) #define USB_TYPE_RESERVED (0x03 << 5) /* * USB recipients, the third of three bRequestType fields */ #define USB_RECIP_MASK 0x1f #define USB_RECIP_DEVICE 0x00 #define USB_RECIP_INTERFACE 0x01 #define USB_RECIP_ENDPOINT 0x02 #define USB_RECIP_OTHER 0x03 /* From Wireless USB 1.0 */ #define USB_RECIP_PORT 0x04 #define USB_RECIP_RPIPE 0x05 /* * Standard requests, for the bRequest field of a SETUP packet. * * These are qualified by the bRequestType field, so that for example * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved * by a GET_STATUS request. */ #define USB_REQ_GET_STATUS 0x00 #define USB_REQ_CLEAR_FEATURE 0x01 #define USB_REQ_SET_FEATURE 0x03 #define USB_REQ_SET_ADDRESS 0x05 #define USB_REQ_GET_DESCRIPTOR 0x06 #define USB_REQ_SET_DESCRIPTOR 0x07 #define USB_REQ_GET_CONFIGURATION 0x08 #define USB_REQ_SET_CONFIGURATION 0x09 #define USB_REQ_GET_INTERFACE 0x0A #define USB_REQ_SET_INTERFACE 0x0B #define USB_REQ_SYNCH_FRAME 0x0C #define USB_REQ_SET_SEL 0x30 #define USB_REQ_SET_ISOCH_DELAY 0x31 #define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */ #define USB_REQ_GET_ENCRYPTION 0x0E #define USB_REQ_RPIPE_ABORT 0x0E #define USB_REQ_SET_HANDSHAKE 0x0F #define USB_REQ_RPIPE_RESET 0x0F #define USB_REQ_GET_HANDSHAKE 0x10 #define USB_REQ_SET_CONNECTION 0x11 #define USB_REQ_SET_SECURITY_DATA 0x12 #define USB_REQ_GET_SECURITY_DATA 0x13 #define USB_REQ_SET_WUSB_DATA 0x14 #define USB_REQ_LOOPBACK_DATA_WRITE 0x15 #define USB_REQ_LOOPBACK_DATA_READ 0x16 #define USB_REQ_SET_INTERFACE_DS 0x17 /* specific requests for USB Power Delivery */ #define USB_REQ_GET_PARTNER_PDO 20 #define USB_REQ_GET_BATTERY_STATUS 21 #define USB_REQ_SET_PDO 22 #define USB_REQ_GET_VDM 23 #define USB_REQ_SEND_VDM 24 /* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command, * used by hubs to put ports into a new L1 suspend state, except that it * forgot to define its number ... */ /* * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and * are read as a bit array returned by USB_REQ_GET_STATUS. (So there * are at most sixteen features of each type.) Hubs may also support a * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend. */ #define USB_DEVICE_SELF_POWERED 0 /* (read only) */ #define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */ #define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */ #define USB_DEVICE_BATTERY 2 /* (wireless) */ #define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */ #define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/ #define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */ #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ /* * Test Mode Selectors * See USB 2.0 spec Table 9-7 */ #define USB_TEST_J 1 #define USB_TEST_K 2 #define USB_TEST_SE0_NAK 3 #define USB_TEST_PACKET 4 #define USB_TEST_FORCE_ENABLE 5 /* Status Type */ #define USB_STATUS_TYPE_STANDARD 0 #define USB_STATUS_TYPE_PTM 1 /* * New Feature Selectors as added by USB 3.0 * See USB 3.0 spec Table 9-7 */ #define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */ #define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */ #define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */ #define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */ #define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00 /* * Suspend Options, Table 9-8 USB 3.0 spec */ #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) /* * Interface status, Figure 9-5 USB 3.0 spec */ #define USB_INTRF_STAT_FUNC_RW_CAP 1 #define USB_INTRF_STAT_FUNC_RW 2 #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ #define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */ #define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ #define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ /* * Feature selectors from Table 9-8 USB Power Delivery spec */ #define USB_DEVICE_BATTERY_WAKE_MASK 40 #define USB_DEVICE_OS_IS_PD_AWARE 41 #define USB_DEVICE_POLICY_MODE 42 #define USB_PORT_PR_SWAP 43 #define USB_PORT_GOTO_MIN 44 #define USB_PORT_RETURN_POWER 45 #define USB_PORT_ACCEPT_PD_REQUEST 46 #define USB_PORT_REJECT_PD_REQUEST 47 #define USB_PORT_PORT_PD_RESET 48 #define USB_PORT_C_PORT_PD_CHANGE 49 #define USB_PORT_CABLE_PD_RESET 50 #define USB_DEVICE_CHARGING_POLICY 54 /** * struct usb_ctrlrequest - SETUP data for a USB device control request * @bRequestType: matches the USB bmRequestType field * @bRequest: matches the USB bRequest field * @wValue: matches the USB wValue field (le16 byte order) * @wIndex: matches the USB wIndex field (le16 byte order) * @wLength: matches the USB wLength field (le16 byte order) * * This structure is used to send control requests to a USB device. It matches * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the * USB spec for a fuller description of the different fields, and what they are * used for. * * Note that the driver for any interface can issue control requests. * For most devices, interfaces don't coordinate with each other, so * such requests may be made at any time. */ struct usb_ctrlrequest { __u8 bRequestType; __u8 bRequest; __le16 wValue; __le16 wIndex; __le16 wLength; } __attribute__ ((packed)); /*-------------------------------------------------------------------------*/ /* * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or * (rarely) accepted by SET_DESCRIPTOR. * * Note that all multi-byte values here are encoded in little endian * byte order "on the wire". Within the kernel and when exposed * through the Linux-USB APIs, they are not converted to cpu byte * order; it is the responsibility of the client code to do this. * The single exception is when device and configuration descriptors (but * not other descriptors) are read from character devices * (i.e. /dev/bus/usb/BBB/DDD); * in this case the fields are converted to host endianness by the kernel. */ /* * Descriptor types ... USB 2.0 spec table 9.5 */ #define USB_DT_DEVICE 0x01 #define USB_DT_CONFIG 0x02 #define USB_DT_STRING 0x03 #define USB_DT_INTERFACE 0x04 #define USB_DT_ENDPOINT 0x05 #define USB_DT_DEVICE_QUALIFIER 0x06 #define USB_DT_OTHER_SPEED_CONFIG 0x07 #define USB_DT_INTERFACE_POWER 0x08 /* these are from a minor usb 2.0 revision (ECN) */ #define USB_DT_OTG 0x09 #define USB_DT_DEBUG 0x0a #define USB_DT_INTERFACE_ASSOCIATION 0x0b /* these are from the Wireless USB spec */ #define USB_DT_SECURITY 0x0c #define USB_DT_KEY 0x0d #define USB_DT_ENCRYPTION_TYPE 0x0e #define USB_DT_BOS 0x0f #define USB_DT_DEVICE_CAPABILITY 0x10 #define USB_DT_WIRELESS_ENDPOINT_COMP 0x11 #define USB_DT_WIRE_ADAPTER 0x21 /* From USB Device Firmware Upgrade Specification, Revision 1.1 */ #define USB_DT_DFU_FUNCTIONAL 0x21 /* these are from the Wireless USB spec */ #define USB_DT_RPIPE 0x22 #define USB_DT_CS_RADIO_CONTROL 0x23 /* From the T10 UAS specification */ #define USB_DT_PIPE_USAGE 0x24 /* From the USB 3.0 spec */ #define USB_DT_SS_ENDPOINT_COMP 0x30 /* From the USB 3.1 spec */ #define USB_DT_SSP_ISOC_ENDPOINT_COMP 0x31 /* Conventional codes for class-specific descriptors. The convention is * defined in the USB "Common Class" Spec (3.11). Individual class specs * are authoritative for their usage, not the "common class" writeup. */ #define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE) #define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG) #define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING) #define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE) #define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT) /* All standard descriptors have these 2 fields at the beginning */ struct usb_descriptor_header { __u8 bLength; __u8 bDescriptorType; } __attribute__ ((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_DEVICE: Device descriptor */ struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; } __attribute__ ((packed)); #define USB_DT_DEVICE_SIZE 18 /* * Device and/or Interface Class codes * as found in bDeviceClass or bInterfaceClass * and defined by www.usb.org documents */ #define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */ #define USB_CLASS_AUDIO 1 #define USB_CLASS_COMM 2 #define USB_CLASS_HID 3 #define USB_CLASS_PHYSICAL 5 #define USB_CLASS_STILL_IMAGE 6 #define USB_CLASS_PRINTER 7 #define USB_CLASS_MASS_STORAGE 8 #define USB_CLASS_HUB 9 #define USB_CLASS_CDC_DATA 0x0a #define USB_CLASS_CSCID 0x0b /* chip+ smart card */ #define USB_CLASS_CONTENT_SEC 0x0d /* content security */ #define USB_CLASS_VIDEO 0x0e #define USB_CLASS_WIRELESS_CONTROLLER 0xe0 #define USB_CLASS_PERSONAL_HEALTHCARE 0x0f #define USB_CLASS_AUDIO_VIDEO 0x10 #define USB_CLASS_BILLBOARD 0x11 #define USB_CLASS_USB_TYPE_C_BRIDGE 0x12 #define USB_CLASS_MISC 0xef #define USB_CLASS_APP_SPEC 0xfe #define USB_SUBCLASS_DFU 0x01 #define USB_CLASS_VENDOR_SPEC 0xff #define USB_SUBCLASS_VENDOR_SPEC 0xff /*-------------------------------------------------------------------------*/ /* USB_DT_CONFIG: Configuration descriptor information. * * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the * descriptor type is different. Highspeed-capable devices can look * different depending on what speed they're currently running. Only * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG * descriptors. */ struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } __attribute__ ((packed)); #define USB_DT_CONFIG_SIZE 9 /* from config descriptor bmAttributes */ #define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */ #define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */ #define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */ #define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */ /*-------------------------------------------------------------------------*/ /* USB String descriptors can contain at most 126 characters. */ #define USB_MAX_STRING_LEN 126 /* USB_DT_STRING: String descriptor */ struct usb_string_descriptor { __u8 bLength; __u8 bDescriptorType; union { __le16 legacy_padding; __DECLARE_FLEX_ARRAY(__le16, wData); /* UTF-16LE encoded */ }; } __attribute__ ((packed)); /* note that "string" zero is special, it holds language codes that * the device supports, not Unicode characters. */ /*-------------------------------------------------------------------------*/ /* USB_DT_INTERFACE: Interface descriptor */ struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; } __attribute__ ((packed)); #define USB_DT_INTERFACE_SIZE 9 /*-------------------------------------------------------------------------*/ /* USB_DT_ENDPOINT: Endpoint descriptor */ struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; /* NOTE: these two are _only_ in audio endpoints. */ /* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */ __u8 bRefresh; __u8 bSynchAddress; } __attribute__ ((packed)); #define USB_DT_ENDPOINT_SIZE 7 #define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */ /* * Endpoints */ #define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ #define USB_ENDPOINT_DIR_MASK 0x80 #define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ #define USB_ENDPOINT_XFER_CONTROL 0 #define USB_ENDPOINT_XFER_ISOC 1 #define USB_ENDPOINT_XFER_BULK 2 #define USB_ENDPOINT_XFER_INT 3 #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 #define USB_ENDPOINT_MAXP_MASK 0x07ff #define USB_EP_MAXP_MULT_SHIFT 11 #define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT) #define USB_EP_MAXP_MULT(m) \ (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT) /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */ #define USB_ENDPOINT_INTRTYPE 0x30 #define USB_ENDPOINT_INTR_PERIODIC (0 << 4) #define USB_ENDPOINT_INTR_NOTIFICATION (1 << 4) #define USB_ENDPOINT_SYNCTYPE 0x0c #define USB_ENDPOINT_SYNC_NONE (0 << 2) #define USB_ENDPOINT_SYNC_ASYNC (1 << 2) #define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2) #define USB_ENDPOINT_SYNC_SYNC (3 << 2) #define USB_ENDPOINT_USAGE_MASK 0x30 #define USB_ENDPOINT_USAGE_DATA 0x00 #define USB_ENDPOINT_USAGE_FEEDBACK 0x10 #define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */ /*-------------------------------------------------------------------------*/ /** * usb_endpoint_num - get the endpoint's number * @epd: endpoint to be checked * * Returns @epd's number: 0 to 15. */ static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) { return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; } /** * usb_endpoint_type - get the endpoint's transfer type * @epd: endpoint to be checked * * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according * to @epd's transfer type. */ static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) { return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; } /** * usb_endpoint_dir_in - check if the endpoint has IN direction * @epd: endpoint to be checked * * Returns true if the endpoint is of type IN, otherwise it returns false. */ static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) { return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); } /** * usb_endpoint_dir_out - check if the endpoint has OUT direction * @epd: endpoint to be checked * * Returns true if the endpoint is of type OUT, otherwise it returns false. */ static inline int usb_endpoint_dir_out( const struct usb_endpoint_descriptor *epd) { return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); } /** * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type * @epd: endpoint to be checked * * Returns true if the endpoint is of type bulk, otherwise it returns false. */ static inline int usb_endpoint_xfer_bulk( const struct usb_endpoint_descriptor *epd) { return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK); } /** * usb_endpoint_xfer_control - check if the endpoint has control transfer type * @epd: endpoint to be checked * * Returns true if the endpoint is of type control, otherwise it returns false. */ static inline int usb_endpoint_xfer_control( const struct usb_endpoint_descriptor *epd) { return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_CONTROL); } /** * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type * @epd: endpoint to be checked * * Returns true if the endpoint is of type interrupt, otherwise it returns * false. */ static inline int usb_endpoint_xfer_int( const struct usb_endpoint_descriptor *epd) { return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT); } /** * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type * @epd: endpoint to be checked * * Returns true if the endpoint is of type isochronous, otherwise it returns * false. */ static inline int usb_endpoint_xfer_isoc( const struct usb_endpoint_descriptor *epd) { return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC); } /** * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN * @epd: endpoint to be checked * * Returns true if the endpoint has bulk transfer type and IN direction, * otherwise it returns false. */ static inline int usb_endpoint_is_bulk_in( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd); } /** * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT * @epd: endpoint to be checked * * Returns true if the endpoint has bulk transfer type and OUT direction, * otherwise it returns false. */ static inline int usb_endpoint_is_bulk_out( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd); } /** * usb_endpoint_is_int_in - check if the endpoint is interrupt IN * @epd: endpoint to be checked * * Returns true if the endpoint has interrupt transfer type and IN direction, * otherwise it returns false. */ static inline int usb_endpoint_is_int_in( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd); } /** * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT * @epd: endpoint to be checked * * Returns true if the endpoint has interrupt transfer type and OUT direction, * otherwise it returns false. */ static inline int usb_endpoint_is_int_out( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd); } /** * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN * @epd: endpoint to be checked * * Returns true if the endpoint has isochronous transfer type and IN direction, * otherwise it returns false. */ static inline int usb_endpoint_is_isoc_in( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd); } /** * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT * @epd: endpoint to be checked * * Returns true if the endpoint has isochronous transfer type and OUT direction, * otherwise it returns false. */ static inline int usb_endpoint_is_isoc_out( const struct usb_endpoint_descriptor *epd) { return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd); } /** * usb_endpoint_maxp - get endpoint's max packet size * @epd: endpoint to be checked * * Returns @epd's max packet bits [10:0] */ static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) { return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK; } /** * usb_endpoint_maxp_mult - get endpoint's transactional opportunities * @epd: endpoint to be checked * * Return @epd's wMaxPacketSize[12:11] + 1 */ static inline int usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd) { int maxp = __le16_to_cpu(epd->wMaxPacketSize); return USB_EP_MAXP_MULT(maxp) + 1; } static inline int usb_endpoint_interrupt_type( const struct usb_endpoint_descriptor *epd) { return epd->bmAttributes & USB_ENDPOINT_INTRTYPE; } /*-------------------------------------------------------------------------*/ /* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion * descriptor */ struct usb_ssp_isoc_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wReseved; __le32 dwBytesPerInterval; } __attribute__ ((packed)); #define USB_DT_SSP_ISOC_EP_COMP_SIZE 8 /*-------------------------------------------------------------------------*/ /* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */ struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } __attribute__ ((packed)); #define USB_DT_SS_EP_COMP_SIZE 6 /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ static inline int usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp) { int max_streams; if (!comp) return 0; max_streams = comp->bmAttributes & 0x1f; if (!max_streams) return 0; max_streams = 1 << max_streams; return max_streams; } /* Bits 1:0 of bmAttributes if this is an isoc endpoint */ #define USB_SS_MULT(p) (1 + ((p) & 0x3)) /* Bit 7 of bmAttributes if a SSP isoc endpoint companion descriptor exists */ #define USB_SS_SSP_ISOC_COMP(p) ((p) & (1 << 7)) /*-------------------------------------------------------------------------*/ /* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */ struct usb_qualifier_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __u8 bNumConfigurations; __u8 bRESERVED; } __attribute__ ((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_OTG (from OTG 1.0a supplement) */ struct usb_otg_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bmAttributes; /* support for HNP, SRP, etc */ } __attribute__ ((packed)); /* USB_DT_OTG (from OTG 2.0 supplement) */ struct usb_otg20_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bmAttributes; /* support for HNP, SRP and ADP, etc */ __le16 bcdOTG; /* OTG and EH supplement release number * in binary-coded decimal(i.e. 2.0 is 0200H) */ } __attribute__ ((packed)); /* from usb_otg_descriptor.bmAttributes */ #define USB_OTG_SRP (1 << 0) #define USB_OTG_HNP (1 << 1) /* swap host/device roles */ #define USB_OTG_ADP (1 << 2) /* support ADP */ /* OTG 3.0 */ #define USB_OTG_RSP (1 << 3) /* support RSP */ #define OTG_STS_SELECTOR 0xF000 /* OTG status selector */ /*-------------------------------------------------------------------------*/ /* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ struct usb_debug_descriptor { __u8 bLength; __u8 bDescriptorType; /* bulk endpoints with 8 byte maxpacket */ __u8 bDebugInEndpoint; __u8 bDebugOutEndpoint; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */ struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; } __attribute__ ((packed)); #define USB_DT_INTERFACE_ASSOCIATION_SIZE 8 /*-------------------------------------------------------------------------*/ /* USB_DT_SECURITY: group of wireless security descriptors, including * encryption types available for setting up a CC/association. */ struct usb_security_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumEncryptionTypes; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys * may be retrieved. */ struct usb_key_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 tTKID[3]; __u8 bReserved; __u8 bKeyData[]; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */ struct usb_encryption_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEncryptionType; #define USB_ENC_TYPE_UNSECURE 0 #define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */ #define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */ #define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */ __u8 bEncryptionValue; /* use in SET_ENCRYPTION */ __u8 bAuthKeyIndex; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_DT_BOS: group of device-level capabilities */ struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } __attribute__((packed)); #define USB_DT_BOS_SIZE 5 /*-------------------------------------------------------------------------*/ /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ struct usb_dev_cap_header { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; } __attribute__((packed)); #define USB_CAP_TYPE_WIRELESS_USB 1 struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; #define USB_WIRELESS_P2P_DRD (1 << 1) #define USB_WIRELESS_BEACON_MASK (3 << 2) #define USB_WIRELESS_BEACON_SELF (1 << 2) #define USB_WIRELESS_BEACON_DIRECTED (2 << 2) #define USB_WIRELESS_BEACON_NONE (3 << 2) __le16 wPHYRates; /* bit rates, Mbps */ #define USB_WIRELESS_PHY_53 (1 << 0) /* always set */ #define USB_WIRELESS_PHY_80 (1 << 1) #define USB_WIRELESS_PHY_107 (1 << 2) /* always set */ #define USB_WIRELESS_PHY_160 (1 << 3) #define USB_WIRELESS_PHY_200 (1 << 4) /* always set */ #define USB_WIRELESS_PHY_320 (1 << 5) #define USB_WIRELESS_PHY_400 (1 << 6) #define USB_WIRELESS_PHY_480 (1 << 7) __u8 bmTFITXPowerInfo; /* TFI power levels */ __u8 bmFFITXPowerInfo; /* FFI power levels */ __le16 bmBandGroup; __u8 bReserved; } __attribute__((packed)); #define USB_DT_USB_WIRELESS_CAP_SIZE 11 /* USB 2.0 Extension descriptor */ #define USB_CAP_TYPE_EXT 2 struct usb_ext_cap_descriptor { /* Link Power Management */ __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ #define USB_BESL_SUPPORT (1 << 2) /* supports BESL */ #define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/ #define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */ #define USB_SET_BESL_BASELINE(p) (((p) & 0xf) << 8) #define USB_SET_BESL_DEEP(p) (((p) & 0xf) << 12) #define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8) #define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12) } __attribute__((packed)); #define USB_DT_USB_EXT_CAP_SIZE 7 /* * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB * specific device level capabilities */ #define USB_SS_CAP_TYPE 3 struct usb_ss_cap_descriptor { /* Link Power Management */ __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; #define USB_LTM_SUPPORT (1 << 1) /* supports LTM */ __le16 wSpeedSupported; #define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */ #define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */ #define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */ #define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */ __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; } __attribute__((packed)); #define USB_DT_USB_SS_CAP_SIZE 10 /* * Container ID Capability descriptor: Defines the instance unique ID used to * identify the instance across all operating modes */ #define CONTAINER_ID_TYPE 4 struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16]; /* 128-bit number */ } __attribute__((packed)); #define USB_DT_USB_SS_CONTN_ID_SIZE 20 /* * Platform Device Capability descriptor: Defines platform specific device * capabilities */ #define USB_PLAT_DEV_CAP_TYPE 5 struct usb_plat_dev_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 UUID[16]; __u8 CapabilityData[]; } __attribute__((packed)); #define USB_DT_USB_PLAT_DEV_CAP_SIZE(capability_data_size) (20 + capability_data_size) /* * SuperSpeed Plus USB Capability descriptor: Defines the set of * SuperSpeed Plus USB specific device level capabilities */ #define USB_SSP_CAP_TYPE 0xa struct usb_ssp_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __le32 bmAttributes; #define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ #define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ __le16 wFunctionalitySupport; #define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) #define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) #define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) __le16 wReserved; union { __le32 legacy_padding; /* list of sublink speed attrib entries */ __DECLARE_FLEX_ARRAY(__le32, bmSublinkSpeedAttr); }; #define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */ #define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */ #define USB_SSP_SUBLINK_SPEED_LSE_BPS 0 #define USB_SSP_SUBLINK_SPEED_LSE_KBPS 1 #define USB_SSP_SUBLINK_SPEED_LSE_MBPS 2 #define USB_SSP_SUBLINK_SPEED_LSE_GBPS 3 #define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */ #define USB_SSP_SUBLINK_SPEED_ST_SYM_RX 0 #define USB_SSP_SUBLINK_SPEED_ST_ASYM_RX 1 #define USB_SSP_SUBLINK_SPEED_ST_SYM_TX 2 #define USB_SSP_SUBLINK_SPEED_ST_ASYM_TX 3 #define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */ #define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */ #define USB_SSP_SUBLINK_SPEED_LP_SS 0 #define USB_SSP_SUBLINK_SPEED_LP_SSP 1 #define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ } __attribute__((packed)); /* * USB Power Delivery Capability Descriptor: * Defines capabilities for PD */ /* Defines the various PD Capabilities of this device */ #define USB_PD_POWER_DELIVERY_CAPABILITY 0x06 /* Provides information on each battery supported by the device */ #define USB_PD_BATTERY_INFO_CAPABILITY 0x07 /* The Consumer characteristics of a Port on the device */ #define USB_PD_PD_CONSUMER_PORT_CAPABILITY 0x08 /* The provider characteristics of a Port on the device */ #define USB_PD_PD_PROVIDER_PORT_CAPABILITY 0x09 struct usb_pd_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; /* set to USB_PD_POWER_DELIVERY_CAPABILITY */ __u8 bReserved; __le32 bmAttributes; #define USB_PD_CAP_BATTERY_CHARGING (1 << 1) /* supports Battery Charging specification */ #define USB_PD_CAP_USB_PD (1 << 2) /* supports USB Power Delivery specification */ #define USB_PD_CAP_PROVIDER (1 << 3) /* can provide power */ #define USB_PD_CAP_CONSUMER (1 << 4) /* can consume power */ #define USB_PD_CAP_CHARGING_POLICY (1 << 5) /* supports CHARGING_POLICY feature */ #define USB_PD_CAP_TYPE_C_CURRENT (1 << 6) /* supports power capabilities defined in the USB Type-C Specification */ #define USB_PD_CAP_PWR_AC (1 << 8) #define USB_PD_CAP_PWR_BAT (1 << 9) #define USB_PD_CAP_PWR_USE_V_BUS (1 << 14) __le16 bmProviderPorts; /* Bit zero refers to the UFP of the device */ __le16 bmConsumerPorts; __le16 bcdBCVersion; __le16 bcdPDVersion; __le16 bcdUSBTypeCVersion; } __attribute__((packed)); struct usb_pd_cap_battery_info_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; /* Index of string descriptor shall contain the user friendly name for this battery */ __u8 iBattery; /* Index of string descriptor shall contain the Serial Number String for this battery */ __u8 iSerial; __u8 iManufacturer; __u8 bBatteryId; /* uniquely identifies this battery in status Messages */ __u8 bReserved; /* * Shall contain the Battery Charge value above which this * battery is considered to be fully charged but not necessarily * “topped off.” */ __le32 dwChargedThreshold; /* in mWh */ /* * Shall contain the minimum charge level of this battery such * that above this threshold, a device can be assured of being * able to power up successfully (see Battery Charging 1.2). */ __le32 dwWeakThreshold; /* in mWh */ __le32 dwBatteryDesignCapacity; /* in mWh */ __le32 dwBatteryLastFullchargeCapacity; /* in mWh */ } __attribute__((packed)); struct usb_pd_cap_consumer_port_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 bmCapabilities; /* port will oerate under: */ #define USB_PD_CAP_CONSUMER_BC (1 << 0) /* BC */ #define USB_PD_CAP_CONSUMER_PD (1 << 1) /* PD */ #define USB_PD_CAP_CONSUMER_TYPE_C (1 << 2) /* USB Type-C Current */ __le16 wMinVoltage; /* in 50mV units */ __le16 wMaxVoltage; /* in 50mV units */ __u16 wReserved; __le32 dwMaxOperatingPower; /* in 10 mW - operating at steady state */ __le32 dwMaxPeakPower; /* in 10mW units - operating at peak power */ __le32 dwMaxPeakPowerTime; /* in 100ms units - duration of peak */ #define USB_PD_CAP_CONSUMER_UNKNOWN_PEAK_POWER_TIME 0xffff } __attribute__((packed)); struct usb_pd_cap_provider_port_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved1; __u8 bmCapabilities; /* port will oerate under: */ #define USB_PD_CAP_PROVIDER_BC (1 << 0) /* BC */ #define USB_PD_CAP_PROVIDER_PD (1 << 1) /* PD */ #define USB_PD_CAP_PROVIDER_TYPE_C (1 << 2) /* USB Type-C Current */ __u8 bNumOfPDObjects; __u8 bReserved2; __le32 wPowerDataObject[]; } __attribute__((packed)); /* * Precision time measurement capability descriptor: advertised by devices and * hubs that support PTM */ #define USB_PTM_CAP_TYPE 0xb struct usb_ptm_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; } __attribute__((packed)); #define USB_DT_USB_PTM_ID_SIZE 3 /* * The size of the descriptor for the Sublink Speed Attribute Count * (SSAC) specified in bmAttributes[4:0]. SSAC is zero-based */ #define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4) /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with * each endpoint descriptor for a wireless device */ struct usb_wireless_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bMaxSequence; __le16 wMaxStreamDelay; __le16 wOverTheAirPacketSize; __u8 bOverTheAirInterval; __u8 bmCompAttributes; #define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */ #define USB_ENDPOINT_SWITCH_NO 0 #define USB_ENDPOINT_SWITCH_SWITCH 1 #define USB_ENDPOINT_SWITCH_SCALE 2 } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless * host and a device for connection set up, mutual authentication, and * exchanging short lived session keys. The handshake depends on a CC. */ struct usb_handshake { __u8 bMessageNumber; __u8 bStatus; __u8 tTKID[3]; __u8 bReserved; __u8 CDID[16]; __u8 nonce[16]; __u8 MIC[8]; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC). * A CC may also be set up using non-wireless secure channels (including * wired USB!), and some devices may support CCs with multiple hosts. */ struct usb_connection_context { __u8 CHID[16]; /* persistent host id */ __u8 CDID[16]; /* device id (unique w/in host context) */ __u8 CK[16]; /* connection key */ } __attribute__((packed)); /*-------------------------------------------------------------------------*/ /* USB 2.0 defines three speeds, here's how Linux identifies them */ enum usb_device_speed { USB_SPEED_UNKNOWN = 0, /* enumerating */ USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ USB_SPEED_HIGH, /* usb 2.0 */ USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ USB_SPEED_SUPER, /* usb 3.0 */ USB_SPEED_SUPER_PLUS, /* usb 3.1 */ }; enum usb_device_state { /* NOTATTACHED isn't in the USB spec, and this state acts * the same as ATTACHED ... but it's clearer this way. */ USB_STATE_NOTATTACHED = 0, /* chapter 9 and authentication (wireless) device states */ USB_STATE_ATTACHED, USB_STATE_POWERED, /* wired */ USB_STATE_RECONNECTING, /* auth */ USB_STATE_UNAUTHENTICATED, /* auth */ USB_STATE_DEFAULT, /* limited function */ USB_STATE_ADDRESS, USB_STATE_CONFIGURED, /* most functions */ USB_STATE_SUSPENDED /* NOTE: there are actually four different SUSPENDED * states, returning to POWERED, DEFAULT, ADDRESS, or * CONFIGURED respectively when SOF tokens flow again. * At this level there's no difference between L1 and L2 * suspend states. (L2 being original USB 1.1 suspend.) */ }; enum usb3_link_state { USB3_LPM_U0 = 0, USB3_LPM_U1, USB3_LPM_U2, USB3_LPM_U3 }; /* * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1. * 0xff means the parent hub will accept transitions to U1, but will not * initiate a transition. * * A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to * U1 after that many microseconds. Timeouts of 0x80 to 0xFE are reserved * values. * * A U2 timeout of 0x0 means the parent hub will reject any transitions to U2. * 0xff means the parent hub will accept transitions to U2, but will not * initiate a transition. * * A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to * U2 after N*256 microseconds. Therefore a U2 timeout value of 0x1 means a U2 * idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means * 65.024ms. */ #define USB3_LPM_DISABLED 0x0 #define USB3_LPM_U1_MAX_TIMEOUT 0x7F #define USB3_LPM_U2_MAX_TIMEOUT 0xFE #define USB3_LPM_DEVICE_INITIATED 0xFF struct usb_set_sel_req { __u8 u1_sel; __u8 u1_pel; __le16 u2_sel; __le16 u2_pel; } __attribute__ ((packed)); /* * The Set System Exit Latency control transfer provides one byte each for * U1 SEL and U1 PEL, so the max exit latency is 0xFF. U2 SEL and U2 PEL each * are two bytes long. */ #define USB3_LPM_MAX_U1_SEL_PEL 0xFF #define USB3_LPM_MAX_U2_SEL_PEL 0xFFFF /*-------------------------------------------------------------------------*/ /* * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 */ #define USB_SELF_POWER_VBUS_MAX_DRAW 100 #endif /* _UAPI__LINUX_USB_CH9_H */
35 35 30 30 30 28 23 35 35 33 35 35 29 29 29 28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Implementation of the access vector table type. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ /* Updated: Frank Mayer <mayerf@tresys.com> and * Karl MacMillan <kmacmillan@tresys.com> * Added conditional policy language extensions * Copyright (C) 2003 Tresys Technology, LLC * * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp> * Tuned number of hash slots for avtab to reduce memory usage */ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include "avtab.h" #include "policydb.h" static struct kmem_cache *avtab_node_cachep __ro_after_init; static struct kmem_cache *avtab_xperms_cachep __ro_after_init; /* Based on MurmurHash3, written by Austin Appleby and placed in the * public domain. */ static inline u32 avtab_hash(const struct avtab_key *keyp, u32 mask) { static const u32 c1 = 0xcc9e2d51; static const u32 c2 = 0x1b873593; static const u32 r1 = 15; static const u32 r2 = 13; static const u32 m = 5; static const u32 n = 0xe6546b64; u32 hash = 0; #define mix(input) \ do { \ u32 v = input; \ v *= c1; \ v = (v << r1) | (v >> (32 - r1)); \ v *= c2; \ hash ^= v; \ hash = (hash << r2) | (hash >> (32 - r2)); \ hash = hash * m + n; \ } while (0) mix(keyp->target_class); mix(keyp->target_type); mix(keyp->source_type); #undef mix hash ^= hash >> 16; hash *= 0x85ebca6b; hash ^= hash >> 13; hash *= 0xc2b2ae35; hash ^= hash >> 16; return hash & mask; } static struct avtab_node *avtab_insert_node(struct avtab *h, struct avtab_node **dst, const struct avtab_key *key, const struct avtab_datum *datum) { struct avtab_node *newnode; struct avtab_extended_perms *xperms; newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL); if (newnode == NULL) return NULL; newnode->key = *key; if (key->specified & AVTAB_XPERMS) { xperms = kmem_cache_zalloc(avtab_xperms_cachep, GFP_KERNEL); if (xperms == NULL) { kmem_cache_free(avtab_node_cachep, newnode); return NULL; } *xperms = *(datum->u.xperms); newnode->datum.u.xperms = xperms; } else { newnode->datum.u.data = datum->u.data; } newnode->next = *dst; *dst = newnode; h->nel++; return newnode; } static int avtab_node_cmp(const struct avtab_key *key1, const struct avtab_key *key2) { u16 specified = key1->specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD); if (key1->source_type == key2->source_type && key1->target_type == key2->target_type && key1->target_class == key2->target_class && (specified & key2->specified)) return 0; if (key1->source_type < key2->source_type) return -1; if (key1->source_type == key2->source_type && key1->target_type < key2->target_type) return -1; if (key1->source_type == key2->source_type && key1->target_type == key2->target_type && key1->target_class < key2->target_class) return -1; return 1; } static int avtab_insert(struct avtab *h, const struct avtab_key *key, const struct avtab_datum *datum) { u32 hvalue; struct avtab_node *prev, *cur, *newnode; int cmp; if (!h || !h->nslot || h->nel == U32_MAX) return -EINVAL; hvalue = avtab_hash(key, h->mask); for (prev = NULL, cur = h->htable[hvalue]; cur; prev = cur, cur = cur->next) { cmp = avtab_node_cmp(key, &cur->key); /* extended perms may not be unique */ if (cmp == 0 && !(key->specified & AVTAB_XPERMS)) return -EEXIST; if (cmp <= 0) break; } newnode = avtab_insert_node(h, prev ? &prev->next : &h->htable[hvalue], key, datum); if (!newnode) return -ENOMEM; return 0; } /* Unlike avtab_insert(), this function allow multiple insertions of the same * key/specified mask into the table, as needed by the conditional avtab. * It also returns a pointer to the node inserted. */ struct avtab_node *avtab_insert_nonunique(struct avtab *h, const struct avtab_key *key, const struct avtab_datum *datum) { u32 hvalue; struct avtab_node *prev, *cur; int cmp; if (!h || !h->nslot || h->nel == U32_MAX) return NULL; hvalue = avtab_hash(key, h->mask); for (prev = NULL, cur = h->htable[hvalue]; cur; prev = cur, cur = cur->next) { cmp = avtab_node_cmp(key, &cur->key); if (cmp <= 0) break; } return avtab_insert_node(h, prev ? &prev->next : &h->htable[hvalue], key, datum); } /* This search function returns a node pointer, and can be used in * conjunction with avtab_search_next_node() */ struct avtab_node *avtab_search_node(struct avtab *h, const struct avtab_key *key) { u32 hvalue; struct avtab_node *cur; int cmp; if (!h || !h->nslot) return NULL; hvalue = avtab_hash(key, h->mask); for (cur = h->htable[hvalue]; cur; cur = cur->next) { cmp = avtab_node_cmp(key, &cur->key); if (cmp == 0) return cur; if (cmp < 0) break; } return NULL; } struct avtab_node *avtab_search_node_next(struct avtab_node *node, u16 specified) { struct avtab_key tmp_key; struct avtab_node *cur; int cmp; if (!node) return NULL; tmp_key = node->key; tmp_key.specified = specified; for (cur = node->next; cur; cur = cur->next) { cmp = avtab_node_cmp(&tmp_key, &cur->key); if (cmp == 0) return cur; if (cmp < 0) break; } return NULL; } void avtab_destroy(struct avtab *h) { u32 i; struct avtab_node *cur, *temp; if (!h) return; for (i = 0; i < h->nslot; i++) { cur = h->htable[i]; while (cur) { temp = cur; cur = cur->next; if (temp->key.specified & AVTAB_XPERMS) kmem_cache_free(avtab_xperms_cachep, temp->datum.u.xperms); kmem_cache_free(avtab_node_cachep, temp); } } kvfree(h->htable); h->htable = NULL; h->nel = 0; h->nslot = 0; h->mask = 0; } void avtab_init(struct avtab *h) { h->htable = NULL; h->nel = 0; h->nslot = 0; h->mask = 0; } static int avtab_alloc_common(struct avtab *h, u32 nslot) { if (!nslot) return 0; h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL); if (!h->htable) return -ENOMEM; h->nslot = nslot; h->mask = nslot - 1; return 0; } int avtab_alloc(struct avtab *h, u32 nrules) { int rc; u32 nslot = 0; if (nrules != 0) { nslot = nrules > 3 ? rounddown_pow_of_two(nrules / 2) : 2; if (nslot > MAX_AVTAB_HASH_BUCKETS) nslot = MAX_AVTAB_HASH_BUCKETS; rc = avtab_alloc_common(h, nslot); if (rc) return rc; } pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules); return 0; } int avtab_alloc_dup(struct avtab *new, const struct avtab *orig) { return avtab_alloc_common(new, orig->nslot); } #ifdef CONFIG_SECURITY_SELINUX_DEBUG void avtab_hash_eval(struct avtab *h, const char *tag) { u32 i, chain_len, slots_used, max_chain_len; unsigned long long chain2_len_sum; struct avtab_node *cur; slots_used = 0; max_chain_len = 0; chain2_len_sum = 0; for (i = 0; i < h->nslot; i++) { cur = h->htable[i]; if (cur) { slots_used++; chain_len = 0; while (cur) { chain_len++; cur = cur->next; } if (chain_len > max_chain_len) max_chain_len = chain_len; chain2_len_sum += (unsigned long long)chain_len * chain_len; } } pr_debug("SELinux: %s: %d entries and %d/%d buckets used, " "longest chain length %d, sum of chain length^2 %llu\n", tag, h->nel, slots_used, h->nslot, max_chain_len, chain2_len_sum); } #endif /* CONFIG_SECURITY_SELINUX_DEBUG */ /* clang-format off */ static const uint16_t spec_order[] = { AVTAB_ALLOWED, AVTAB_AUDITDENY, AVTAB_AUDITALLOW, AVTAB_TRANSITION, AVTAB_CHANGE, AVTAB_MEMBER, AVTAB_XPERMS_ALLOWED, AVTAB_XPERMS_AUDITALLOW, AVTAB_XPERMS_DONTAUDIT }; /* clang-format on */ int avtab_read_item(struct avtab *a, struct policy_file *fp, struct policydb *pol, int (*insertf)(struct avtab *a, const struct avtab_key *k, const struct avtab_datum *d, void *p), void *p, bool conditional) { __le16 buf16[4]; u16 enabled; u32 items, items2, val, i; struct avtab_key key; struct avtab_datum datum; struct avtab_extended_perms xperms; __le32 buf32[ARRAY_SIZE(xperms.perms.p)]; int rc; unsigned int set, vers = pol->policyvers; memset(&key, 0, sizeof(struct avtab_key)); memset(&datum, 0, sizeof(struct avtab_datum)); if (vers < POLICYDB_VERSION_AVTAB) { rc = next_entry(buf32, fp, sizeof(u32)); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } items2 = le32_to_cpu(buf32[0]); if (items2 > ARRAY_SIZE(buf32)) { pr_err("SELinux: avtab: entry overflow\n"); return -EINVAL; } rc = next_entry(buf32, fp, sizeof(u32) * items2); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } items = 0; val = le32_to_cpu(buf32[items++]); key.source_type = (u16)val; if (key.source_type != val) { pr_err("SELinux: avtab: truncated source type\n"); return -EINVAL; } val = le32_to_cpu(buf32[items++]); key.target_type = (u16)val; if (key.target_type != val) { pr_err("SELinux: avtab: truncated target type\n"); return -EINVAL; } val = le32_to_cpu(buf32[items++]); key.target_class = (u16)val; if (key.target_class != val) { pr_err("SELinux: avtab: truncated target class\n"); return -EINVAL; } val = le32_to_cpu(buf32[items++]); enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0; if (!(val & (AVTAB_AV | AVTAB_TYPE))) { pr_err("SELinux: avtab: null entry\n"); return -EINVAL; } if ((val & AVTAB_AV) && (val & AVTAB_TYPE)) { pr_err("SELinux: avtab: entry has both access vectors and types\n"); return -EINVAL; } if (val & AVTAB_XPERMS) { pr_err("SELinux: avtab: entry has extended permissions\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(spec_order); i++) { if (val & spec_order[i]) { key.specified = spec_order[i] | enabled; datum.u.data = le32_to_cpu(buf32[items++]); rc = insertf(a, &key, &datum, p); if (rc) return rc; } } if (items != items2) { pr_err("SELinux: avtab: entry only had %d items, expected %d\n", items2, items); return -EINVAL; } return 0; } rc = next_entry(buf16, fp, sizeof(u16) * 4); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } items = 0; key.source_type = le16_to_cpu(buf16[items++]); key.target_type = le16_to_cpu(buf16[items++]); key.target_class = le16_to_cpu(buf16[items++]); key.specified = le16_to_cpu(buf16[items++]); if (!policydb_type_isvalid(pol, key.source_type) || !policydb_type_isvalid(pol, key.target_type) || !policydb_class_isvalid(pol, key.target_class)) { pr_err("SELinux: avtab: invalid type or class\n"); return -EINVAL; } set = hweight16(key.specified & (AVTAB_XPERMS | AVTAB_TYPE | AVTAB_AV)); if (!set || set > 1) { pr_err("SELinux: avtab: more than one specifier\n"); return -EINVAL; } if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) && (key.specified & AVTAB_XPERMS)) { pr_err("SELinux: avtab: policy version %u does not " "support extended permissions rules and one " "was specified\n", vers); return -EINVAL; } else if ((vers < POLICYDB_VERSION_COND_XPERMS) && (key.specified & AVTAB_XPERMS) && conditional) { pr_err("SELinux: avtab: policy version %u does not " "support extended permissions rules in conditional " "policies and one was specified\n", vers); return -EINVAL; } else if (key.specified & AVTAB_XPERMS) { memset(&xperms, 0, sizeof(struct avtab_extended_perms)); rc = next_entry(&xperms.specified, fp, sizeof(u8)); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } rc = next_entry(&xperms.driver, fp, sizeof(u8)); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } rc = next_entry(buf32, fp, sizeof(u32) * ARRAY_SIZE(xperms.perms.p)); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++) xperms.perms.p[i] = le32_to_cpu(buf32[i]); datum.u.xperms = &xperms; } else { rc = next_entry(buf32, fp, sizeof(u32)); if (rc) { pr_err("SELinux: avtab: truncated entry\n"); return rc; } datum.u.data = le32_to_cpu(*buf32); } if ((key.specified & AVTAB_TYPE) && !policydb_type_isvalid(pol, datum.u.data)) { pr_err("SELinux: avtab: invalid type\n"); return -EINVAL; } return insertf(a, &key, &datum, p); } static int avtab_insertf(struct avtab *a, const struct avtab_key *k, const struct avtab_datum *d, void *p) { return avtab_insert(a, k, d); } int avtab_read(struct avtab *a, struct policy_file *fp, struct policydb *pol) { int rc; __le32 buf[1]; u32 nel, i; rc = next_entry(buf, fp, sizeof(u32)); if (rc < 0) { pr_err("SELinux: avtab: truncated table\n"); goto bad; } nel = le32_to_cpu(buf[0]); if (!nel) { pr_err("SELinux: avtab: table is empty\n"); rc = -EINVAL; goto bad; } rc = avtab_alloc(a, nel); if (rc) goto bad; for (i = 0; i < nel; i++) { rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL, false); if (rc) { if (rc == -ENOMEM) pr_err("SELinux: avtab: out of memory\n"); else if (rc == -EEXIST) pr_err("SELinux: avtab: duplicate entry\n"); goto bad; } } rc = 0; out: return rc; bad: avtab_destroy(a); goto out; } int avtab_write_item(struct policydb *p, const struct avtab_node *cur, struct policy_file *fp) { __le16 buf16[4]; __le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)]; int rc; unsigned int i; buf16[0] = cpu_to_le16(cur->key.source_type); buf16[1] = cpu_to_le16(cur->key.target_type); buf16[2] = cpu_to_le16(cur->key.target_class); buf16[3] = cpu_to_le16(cur->key.specified); rc = put_entry(buf16, sizeof(u16), 4, fp); if (rc) return rc; if (cur->key.specified & AVTAB_XPERMS) { rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp); if (rc) return rc; rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp); if (rc) return rc; for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++) buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]); rc = put_entry(buf32, sizeof(u32), ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp); } else { buf32[0] = cpu_to_le32(cur->datum.u.data); rc = put_entry(buf32, sizeof(u32), 1, fp); } if (rc) return rc; return 0; } int avtab_write(struct policydb *p, struct avtab *a, struct policy_file *fp) { u32 i; int rc = 0; struct avtab_node *cur; __le32 buf[1]; buf[0] = cpu_to_le32(a->nel); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; for (i = 0; i < a->nslot; i++) { for (cur = a->htable[i]; cur; cur = cur->next) { rc = avtab_write_item(p, cur, fp); if (rc) return rc; } } return rc; } void __init avtab_cache_init(void) { avtab_node_cachep = KMEM_CACHE(avtab_node, SLAB_PANIC); avtab_xperms_cachep = KMEM_CACHE(avtab_extended_perms, SLAB_PANIC); }
5 5 5 5 5 5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> */ #include "pvrusb2-std.h" #include "pvrusb2-debug.h" #include <asm/string.h> #include <linux/slab.h> struct std_name { const char *name; v4l2_std_id id; }; #define CSTD_PAL \ (V4L2_STD_PAL_B| \ V4L2_STD_PAL_B1| \ V4L2_STD_PAL_G| \ V4L2_STD_PAL_H| \ V4L2_STD_PAL_I| \ V4L2_STD_PAL_D| \ V4L2_STD_PAL_D1| \ V4L2_STD_PAL_K| \ V4L2_STD_PAL_M| \ V4L2_STD_PAL_N| \ V4L2_STD_PAL_Nc| \ V4L2_STD_PAL_60) #define CSTD_NTSC \ (V4L2_STD_NTSC_M| \ V4L2_STD_NTSC_M_JP| \ V4L2_STD_NTSC_M_KR| \ V4L2_STD_NTSC_443) #define CSTD_ATSC \ (V4L2_STD_ATSC_8_VSB| \ V4L2_STD_ATSC_16_VSB) #define CSTD_SECAM \ (V4L2_STD_SECAM_B| \ V4L2_STD_SECAM_D| \ V4L2_STD_SECAM_G| \ V4L2_STD_SECAM_H| \ V4L2_STD_SECAM_K| \ V4L2_STD_SECAM_K1| \ V4L2_STD_SECAM_L| \ V4L2_STD_SECAM_LC) #define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B) #define TSTD_B1 (V4L2_STD_PAL_B1) #define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D) #define TSTD_D1 (V4L2_STD_PAL_D1) #define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G) #define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H) #define TSTD_I (V4L2_STD_PAL_I) #define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K) #define TSTD_K1 (V4L2_STD_SECAM_K1) #define TSTD_L (V4L2_STD_SECAM_L) #define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M) #define TSTD_N (V4L2_STD_PAL_N) #define TSTD_Nc (V4L2_STD_PAL_Nc) #define TSTD_60 (V4L2_STD_PAL_60) #define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_ATSC|CSTD_SECAM) /* Mapping of standard bits to color system */ static const struct std_name std_groups[] = { {"PAL",CSTD_PAL}, {"NTSC",CSTD_NTSC}, {"SECAM",CSTD_SECAM}, {"ATSC",CSTD_ATSC}, }; /* Mapping of standard bits to modulation system */ static const struct std_name std_items[] = { {"B",TSTD_B}, {"B1",TSTD_B1}, {"D",TSTD_D}, {"D1",TSTD_D1}, {"G",TSTD_G}, {"H",TSTD_H}, {"I",TSTD_I}, {"K",TSTD_K}, {"K1",TSTD_K1}, {"L",TSTD_L}, {"LC",V4L2_STD_SECAM_LC}, {"M",TSTD_M}, {"Mj",V4L2_STD_NTSC_M_JP}, {"443",V4L2_STD_NTSC_443}, {"Mk",V4L2_STD_NTSC_M_KR}, {"N",TSTD_N}, {"Nc",TSTD_Nc}, {"60",TSTD_60}, {"8VSB",V4L2_STD_ATSC_8_VSB}, {"16VSB",V4L2_STD_ATSC_16_VSB}, }; // Search an array of std_name structures and return a pointer to the // element with the matching name. static const struct std_name *find_std_name(const struct std_name *arrPtr, unsigned int arrSize, const char *bufPtr, unsigned int bufSize) { unsigned int idx; const struct std_name *p; for (idx = 0; idx < arrSize; idx++) { p = arrPtr + idx; if (strlen(p->name) != bufSize) continue; if (!memcmp(bufPtr,p->name,bufSize)) return p; } return NULL; } int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, unsigned int bufSize) { v4l2_std_id id = 0; v4l2_std_id cmsk = 0; v4l2_std_id t; int mMode = 0; unsigned int cnt; char ch; const struct std_name *sp; while (bufSize) { if (!mMode) { cnt = 0; while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++; if (cnt >= bufSize) return 0; // No more characters sp = find_std_name(std_groups, ARRAY_SIZE(std_groups), bufPtr,cnt); if (!sp) return 0; // Illegal color system name cnt++; bufPtr += cnt; bufSize -= cnt; mMode = !0; cmsk = sp->id; continue; } cnt = 0; while (cnt < bufSize) { ch = bufPtr[cnt]; if (ch == ';') { mMode = 0; break; } if (ch == '/') break; cnt++; } sp = find_std_name(std_items, ARRAY_SIZE(std_items), bufPtr,cnt); if (!sp) return 0; // Illegal modulation system ID t = sp->id & cmsk; if (!t) return 0; // Specific color + modulation system illegal id |= t; if (cnt < bufSize) cnt++; bufPtr += cnt; bufSize -= cnt; } if (idPtr) *idPtr = id; return !0; } unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, v4l2_std_id id) { unsigned int idx1,idx2; const struct std_name *ip,*gp; int gfl,cfl; unsigned int c1,c2; cfl = 0; c1 = 0; for (idx1 = 0; idx1 < ARRAY_SIZE(std_groups); idx1++) { gp = std_groups + idx1; gfl = 0; for (idx2 = 0; idx2 < ARRAY_SIZE(std_items); idx2++) { ip = std_items + idx2; if (!(gp->id & ip->id & id)) continue; if (!gfl) { if (cfl) { c2 = scnprintf(bufPtr,bufSize,";"); c1 += c2; bufSize -= c2; bufPtr += c2; } cfl = !0; c2 = scnprintf(bufPtr,bufSize, "%s-",gp->name); gfl = !0; } else { c2 = scnprintf(bufPtr,bufSize,"/"); } c1 += c2; bufSize -= c2; bufPtr += c2; c2 = scnprintf(bufPtr,bufSize, ip->name); c1 += c2; bufSize -= c2; bufPtr += c2; } } return c1; } // Template data for possible enumerated video standards. Here we group // standards which share common frame rates and resolution. static struct v4l2_standard generic_standards[] = { { .id = (TSTD_B|TSTD_B1| TSTD_D|TSTD_D1| TSTD_G| TSTD_H| TSTD_I| TSTD_K|TSTD_K1| TSTD_L| V4L2_STD_SECAM_LC | TSTD_N|TSTD_Nc), .frameperiod = { .numerator = 1, .denominator= 25 }, .framelines = 625, .reserved = {0,0,0,0} }, { .id = (TSTD_M| V4L2_STD_NTSC_M_JP| V4L2_STD_NTSC_M_KR), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is a total wild guess .id = (TSTD_60), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is total wild guess .id = V4L2_STD_NTSC_443, .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} } }; static struct v4l2_standard *match_std(v4l2_std_id id) { unsigned int idx; for (idx = 0; idx < ARRAY_SIZE(generic_standards); idx++) { if (generic_standards[idx].id & id) { return generic_standards + idx; } } return NULL; } static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id) { struct v4l2_standard *template; int idx; unsigned int bcnt; template = match_std(id); if (!template) return 0; idx = std->index; memcpy(std,template,sizeof(*template)); std->index = idx; std->id = id; bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id); std->name[bcnt] = 0; pvr2_trace(PVR2_TRACE_STD,"Set up standard idx=%u name=%s", std->index,std->name); return !0; } /* These are special cases of combined standards that we should enumerate separately if the component pieces are present. */ static v4l2_std_id std_mixes[] = { V4L2_STD_PAL_B | V4L2_STD_PAL_G, V4L2_STD_PAL_D | V4L2_STD_PAL_K, V4L2_STD_SECAM_B | V4L2_STD_SECAM_G, V4L2_STD_SECAM_D | V4L2_STD_SECAM_K, }; struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, v4l2_std_id id) { unsigned int std_cnt = 0; unsigned int idx,bcnt,idx2; v4l2_std_id idmsk,cmsk,fmsk; struct v4l2_standard *stddefs; if (pvrusb2_debug & PVR2_TRACE_STD) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id); pvr2_trace( PVR2_TRACE_STD,"Mapping standards mask=0x%x (%.*s)", (int)id,bcnt,buf); } *countptr = 0; std_cnt = 0; fmsk = 0; for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (match_std(idmsk)) { std_cnt++; continue; } fmsk |= idmsk; } for (idx2 = 0; idx2 < ARRAY_SIZE(std_mixes); idx2++) { if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++; } /* Don't complain about ATSC standard values */ fmsk &= ~CSTD_ATSC; if (fmsk) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); pvr2_trace( PVR2_TRACE_ERROR_LEGS, "***WARNING*** Failed to classify the following standard(s): %.*s", bcnt,buf); } pvr2_trace(PVR2_TRACE_STD,"Setting up %u unique standard(s)", std_cnt); if (!std_cnt) return NULL; // paranoia stddefs = kcalloc(std_cnt, sizeof(struct v4l2_standard), GFP_KERNEL); if (!stddefs) return NULL; for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx; idx = 0; /* Enumerate potential special cases */ for (idx2 = 0; (idx2 < ARRAY_SIZE(std_mixes)) && (idx < std_cnt); idx2++) { if (!(id & std_mixes[idx2])) continue; if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++; } /* Now enumerate individual pieces */ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (!pvr2_std_fill(stddefs+idx,idmsk)) continue; idx++; } *countptr = std_cnt; return stddefs; } v4l2_std_id pvr2_std_get_usable(void) { return CSTD_ALL; }
145 146 328 327 292 328 328 328 328 292 149 295 146 295 293 86 295 2 2 2 2 2 2 2 1 2 2 294 289 295 23 22 294 295 293 295 295 288 328 327 327 295 291 3 3 3 3 319 5 320 299 320 309 291 93 272 130 310 162 28 28 3 1 1 26 9 22 159 151 12 103 63 63 63 2 2 2 1 2 288 105 289 288 291 286 295 1 1 272 295 111 289 294 290 2 290 167 167 1 1 1 1 1 1 294 3 295 50 6 295 144 140 146 100 108 146 145 146 1 1 1 1 2 2 2 146 295 295 293 295 295 293 76 46 45 4 77 295 295 292 295 293 63 295 118 295 295 146 145 146 144 145 146 145 294 57 292 146 10 10 10 10 10 10 293 294 292 293 294 295 294 294 291 295 293 295 293 294 295 293 294 293 295 293 294 295 292 294 295 292 146 430 146 146 146 10 146 146 146 146 146 426 429 429 248 250 283 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 // SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu.c - percpu memory allocator * * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * Copyright (C) 2017 Facebook Inc. * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> * * The percpu allocator handles both static and dynamic areas. Percpu * areas are allocated in chunks which are divided into units. There is * a 1-to-1 mapping for units to possible cpus. These units are grouped * based on NUMA properties of the machine. * * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done by offsets into a unit's address space. Ie., an * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear * and even sparse. Access is handled by configuring percpu base * registers according to the cpu to unit mappings and offsetting the * base address using pcpu_unit_size. * * There is special consideration for the first chunk which must handle * the static percpu variables in the kernel image as allocation services * are not online yet. In short, the first chunk is structured like so: * * <Static | [Reserved] | Dynamic> * * The static data is copied from the original section managed by the * linker. The reserved section, if non-zero, primarily manages static * percpu variables from kernel modules. Finally, the dynamic section * takes care of normal allocations. * * The allocator organizes chunks into lists according to free size and * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT * flag should be passed. All memcg-aware allocations are sharing one set * of chunks and all unaccounted allocations and allocations performed * by processes belonging to the root memory cgroup are using the second set. * * The allocator tries to allocate from the fullest chunk first. Each chunk * is managed by a bitmap with metadata blocks. The allocation map is updated * on every allocation and free to reflect the current state while the boundary * map is only updated on allocation. Each metadata block contains * information to help mitigate the need to iterate over large portions * of the bitmap. The reverse mapping from page to chunk is stored in * the page's index. Lastly, units are lazily backed and grow in unison. * * There is a unique conversion that goes on here between bytes and bits. * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk * tracks the number of pages it is responsible for in nr_pages. Helper * functions are used to convert from between the bytes, bits, and blocks. * All hints are managed in bits unless explicitly stated. * * To use this allocator, arch code should do the following: * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default * * - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitmap.h> #include <linux/cpumask.h> #include <linux/memblock.h> #include <linux/err.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/memcontrol.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/io.h> #define CREATE_TRACE_POINTS #include <trace/events/percpu.h> #include "percpu-internal.h" /* * The slots are sorted by the size of the biggest continuous free area. * 1-31 bytes share the same slot. */ #define PCPU_SLOT_BASE_SHIFT 5 /* chunks in slots below this are subject to being sidelined on failed alloc */ #define PCPU_SLOT_FAIL_THRESHOLD 3 #define PCPU_EMPTY_POP_PAGES_LOW 2 #define PCPU_EMPTY_POP_PAGES_HIGH 4 #ifdef CONFIG_SMP /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ (void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) #endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ (void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) #endif #else /* CONFIG_SMP */ /* on UP, it's always identity mapped */ #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ static int pcpu_unit_pages __ro_after_init; static int pcpu_unit_size __ro_after_init; static int pcpu_nr_units __ro_after_init; static int pcpu_atom_size __ro_after_init; int pcpu_nr_slots __ro_after_init; static int pcpu_free_slot __ro_after_init; int pcpu_sidelined_slot __ro_after_init; int pcpu_to_depopulate_slot __ro_after_init; static size_t pcpu_chunk_struct_size __ro_after_init; /* cpus with the lowest and highest unit addresses */ static unsigned int pcpu_low_unit_cpu __ro_after_init; static unsigned int pcpu_high_unit_cpu __ro_after_init; /* the address of the first chunk which starts with the kernel static area */ void *pcpu_base_addr __ro_after_init; static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ /* group information, used for vm allocation */ static int pcpu_nr_groups __ro_after_init; static const unsigned long *pcpu_group_offsets __ro_after_init; static const size_t *pcpu_group_sizes __ro_after_init; /* * The first chunk which always exists. Note that unlike other * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ struct pcpu_chunk *pcpu_first_chunk __ro_after_init; /* * Optional reserved chunk. This chunk reserves part of the first * chunk and serves it for reserved allocations. When the reserved * region doesn't exist, the following variable is NULL. */ struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ /* * The number of empty populated pages, protected by pcpu_lock. * The reserved chunk doesn't contribute to the count. */ int pcpu_nr_empty_pop_pages; /* * The number of populated pages in use by the allocator, protected by * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets * allocated/deallocated, it is allocated/deallocated in all units of a chunk * and increments/decrements this count by 1). */ static unsigned long pcpu_nr_populated; /* * Balance work is used to populate or destroy chunks asynchronously. We * try to keep the number of populated free pages between * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one * empty chunk. */ static void pcpu_balance_workfn(struct work_struct *work); static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); static bool pcpu_async_enabled __read_mostly; static bool pcpu_atomic_alloc_failed; static void pcpu_schedule_balance_work(void) { if (pcpu_async_enabled) schedule_work(&pcpu_balance_work); } /** * pcpu_addr_in_chunk - check if the address is served from this chunk * @chunk: chunk of interest * @addr: percpu address * * RETURNS: * True if the address is served from this chunk. */ static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) { void *start_addr, *end_addr; if (!chunk) return false; start_addr = chunk->base_addr + chunk->start_offset; end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - chunk->end_offset; return addr >= start_addr && addr < end_addr; } static int __pcpu_size_to_slot(int size) { int highbit = fls(size); /* size is in bytes */ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_free_slot; return __pcpu_size_to_slot(size); } static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { const struct pcpu_block_md *chunk_md = &chunk->chunk_md; if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk_md->contig_hint == 0) return 0; return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); } /* set the pointer to a chunk in a page struct */ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->private = (unsigned long)pcpu; } /* obtain pointer to a chunk from a page struct */ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->private; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) { return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) { return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); } static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_page_offset(cpu, page_idx); } /* * The following are helper functions to help access bitmaps and convert * between bitmap offsets to address offsets. */ static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) { return chunk->alloc_map + (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); } static unsigned long pcpu_off_to_block_index(int off) { return off / PCPU_BITMAP_BLOCK_BITS; } static unsigned long pcpu_off_to_block_off(int off) { return off & (PCPU_BITMAP_BLOCK_BITS - 1); } static unsigned long pcpu_block_off_to_off(int index, int off) { return index * PCPU_BITMAP_BLOCK_BITS + off; } /** * pcpu_check_block_hint - check against the contig hint * @block: block of interest * @bits: size of allocation * @align: alignment of area (max PAGE_SIZE) * * Check to see if the allocation can fit in the block's contig hint. * Note, a chunk uses the same hints as a block so this can also check against * the chunk's contig hint. */ static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, size_t align) { int bit_off = ALIGN(block->contig_hint_start, align) - block->contig_hint_start; return bit_off + bits <= block->contig_hint; } /* * pcpu_next_hint - determine which hint to use * @block: block of interest * @alloc_bits: size of allocation * * This determines if we should scan based on the scan_hint or first_free. * In general, we want to scan from first_free to fulfill allocations by * first fit. However, if we know a scan_hint at position scan_hint_start * cannot fulfill an allocation, we can begin scanning from there knowing * the contig_hint will be our fallback. */ static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) { /* * The three conditions below determine if we can skip past the * scan_hint. First, does the scan hint exist. Second, is the * contig_hint after the scan_hint (possibly not true iff * contig_hint == scan_hint). Third, is the allocation request * larger than the scan_hint. */ if (block->scan_hint && block->contig_hint_start > block->scan_hint_start && alloc_bits > block->scan_hint) return block->scan_hint_start + block->scan_hint; return block->first_free; } /** * pcpu_next_md_free_region - finds the next hint free area * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of free area * * Helper function for pcpu_for_each_md_free_region. It checks * block->contig_hint and performs aggregation across blocks to find the * next hint. It modifies bit_off and bits in-place to be consumed in the * loop. */ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, int *bits) { int i = pcpu_off_to_block_index(*bit_off); int block_off = pcpu_off_to_block_off(*bit_off); struct pcpu_block_md *block; *bits = 0; for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); block++, i++) { /* handles contig area across blocks */ if (*bits) { *bits += block->left_free; if (block->left_free == PCPU_BITMAP_BLOCK_BITS) continue; return; } /* * This checks three things. First is there a contig_hint to * check. Second, have we checked this hint before by * comparing the block_off. Third, is this the same as the * right contig hint. In the last case, it spills over into * the next block and should be handled by the contig area * across blocks code. */ *bits = block->contig_hint; if (*bits && block->contig_hint_start >= block_off && *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { *bit_off = pcpu_block_off_to_off(i, block->contig_hint_start); return; } /* reset to satisfy the second predicate above */ block_off = 0; *bits = block->right_free; *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; } } /** * pcpu_next_fit_region - finds fit areas for a given allocation request * @chunk: chunk of interest * @alloc_bits: size of allocation * @align: alignment of area (max PAGE_SIZE) * @bit_off: chunk offset * @bits: size of free area * * Finds the next free region that is viable for use with a given size and * alignment. This only returns if there is a valid area to be used for this * allocation. block->first_free is returned if the allocation request fits * within the block to see if the request can be fulfilled prior to the contig * hint. */ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, int align, int *bit_off, int *bits) { int i = pcpu_off_to_block_index(*bit_off); int block_off = pcpu_off_to_block_off(*bit_off); struct pcpu_block_md *block; *bits = 0; for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); block++, i++) { /* handles contig area across blocks */ if (*bits) { *bits += block->left_free; if (*bits >= alloc_bits) return; if (block->left_free == PCPU_BITMAP_BLOCK_BITS) continue; } /* check block->contig_hint */ *bits = ALIGN(block->contig_hint_start, align) - block->contig_hint_start; /* * This uses the block offset to determine if this has been * checked in the prior iteration. */ if (block->contig_hint && block->contig_hint_start >= block_off && block->contig_hint >= *bits + alloc_bits) { int start = pcpu_next_hint(block, alloc_bits); *bits += alloc_bits + block->contig_hint_start - start; *bit_off = pcpu_block_off_to_off(i, start); return; } /* reset to satisfy the second predicate above */ block_off = 0; *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, align); *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; *bit_off = pcpu_block_off_to_off(i, *bit_off); if (*bits >= alloc_bits) return; } /* no valid offsets were found - fail condition */ *bit_off = pcpu_chunk_map_bits(chunk); } /* * Metadata free area iterators. These perform aggregation of free areas * based on the metadata blocks and return the offset @bit_off and size in * bits of the free area @bits. pcpu_for_each_fit_region only returns when * a fit is found for the allocation request. */ #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ (bit_off) < pcpu_chunk_map_bits((chunk)); \ (bit_off) += (bits) + 1, \ pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ &(bits)); \ (bit_off) < pcpu_chunk_map_bits((chunk)); \ (bit_off) += (bits), \ pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ &(bits))) /** * pcpu_mem_zalloc - allocate memory * @size: bytes to allocate * @gfp: allocation flags * * Allocate @size bytes. If @size is smaller than PAGE_SIZE, * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. * This is to facilitate passing through whitelisted flags. The * returned memory is always zeroed. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) { if (WARN_ON_ONCE(!slab_is_available())) return NULL; if (size <= PAGE_SIZE) return kzalloc(size, gfp); else return __vmalloc(size, gfp | __GFP_ZERO); } /** * pcpu_mem_free - free memory * @ptr: memory to free * * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). */ static void pcpu_mem_free(void *ptr) { kvfree(ptr); } static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, bool move_front) { if (chunk != pcpu_reserved_chunk) { if (move_front) list_move(&chunk->list, &pcpu_chunk_lists[slot]); else list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); } } static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) { __pcpu_chunk_move(chunk, slot, true); } /** * pcpu_chunk_relocate - put chunk in the appropriate chunk slot * @chunk: chunk of interest * @oslot: the previous slot it was on * * This function is called after an allocation or free changed @chunk. * New slot according to the changed state is determined and @chunk is * moved to the slot. Note that the reserved chunk is never put on * chunk slots. * * CONTEXT: * pcpu_lock. */ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); /* leave isolated chunks in-place */ if (chunk->isolated) return; if (oslot != nslot) __pcpu_chunk_move(chunk, nslot, oslot < nslot); } static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) { lockdep_assert_held(&pcpu_lock); if (!chunk->isolated) { chunk->isolated = true; pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; } list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); } static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) { lockdep_assert_held(&pcpu_lock); if (chunk->isolated) { chunk->isolated = false; pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; pcpu_chunk_relocate(chunk, -1); } } /* * pcpu_update_empty_pages - update empty page counters * @chunk: chunk of interest * @nr: nr of empty pages * * This is used to keep track of the empty pages now based on the premise * a md_block covers a page. The hint update functions recognize if a block * is made full or broken to calculate deltas for keeping track of free pages. */ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) { chunk->nr_empty_pop_pages += nr; if (chunk != pcpu_reserved_chunk && !chunk->isolated) pcpu_nr_empty_pop_pages += nr; } /* * pcpu_region_overlap - determines if two regions overlap * @a: start of first region, inclusive * @b: end of first region, exclusive * @x: start of second region, inclusive * @y: end of second region, exclusive * * This is used to determine if the hint region [a, b) overlaps with the * allocated region [x, y). */ static inline bool pcpu_region_overlap(int a, int b, int x, int y) { return (a < y) && (x < b); } /** * pcpu_block_update - updates a block given a free area * @block: block of interest * @start: start offset in block * @end: end offset in block * * Updates a block given a known free area. The region [start, end) is * expected to be the entirety of the free area within a block. Chooses * the best starting offset if the contig hints are equal. */ static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) { int contig = end - start; block->first_free = min(block->first_free, start); if (start == 0) block->left_free = contig; if (end == block->nr_bits) block->right_free = contig; if (contig > block->contig_hint) { /* promote the old contig_hint to be the new scan_hint */ if (start > block->contig_hint_start) { if (block->contig_hint > block->scan_hint) { block->scan_hint_start = block->contig_hint_start; block->scan_hint = block->contig_hint; } else if (start < block->scan_hint_start) { /* * The old contig_hint == scan_hint. But, the * new contig is larger so hold the invariant * scan_hint_start < contig_hint_start. */ block->scan_hint = 0; } } else { block->scan_hint = 0; } block->contig_hint_start = start; block->contig_hint = contig; } else if (contig == block->contig_hint) { if (block->contig_hint_start && (!start || __ffs(start) > __ffs(block->contig_hint_start))) { /* start has a better alignment so use it */ block->contig_hint_start = start; if (start < block->scan_hint_start && block->contig_hint > block->scan_hint) block->scan_hint = 0; } else if (start > block->scan_hint_start || block->contig_hint > block->scan_hint) { /* * Knowing contig == contig_hint, update the scan_hint * if it is farther than or larger than the current * scan_hint. */ block->scan_hint_start = start; block->scan_hint = contig; } } else { /* * The region is smaller than the contig_hint. So only update * the scan_hint if it is larger than or equal and farther than * the current scan_hint. */ if ((start < block->contig_hint_start && (contig > block->scan_hint || (contig == block->scan_hint && start > block->scan_hint_start)))) { block->scan_hint_start = start; block->scan_hint = contig; } } } /* * pcpu_block_update_scan - update a block given a free area from a scan * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of free area * * Finding the final allocation spot first goes through pcpu_find_block_fit() * to find a block that can hold the allocation and then pcpu_alloc_area() * where a scan is used. When allocations require specific alignments, * we can inadvertently create holes which will not be seen in the alloc * or free paths. * * This takes a given free area hole and updates a block as it may change the * scan_hint. We need to scan backwards to ensure we don't miss free bits * from alignment. */ static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, int bits) { int s_off = pcpu_off_to_block_off(bit_off); int e_off = s_off + bits; int s_index, l_bit; struct pcpu_block_md *block; if (e_off > PCPU_BITMAP_BLOCK_BITS) return; s_index = pcpu_off_to_block_index(bit_off); block = chunk->md_blocks + s_index; /* scan backwards in case of alignment skipping free bits */ l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); s_off = (s_off == l_bit) ? 0 : l_bit + 1; pcpu_block_update(block, s_off, e_off); } /** * pcpu_chunk_refresh_hint - updates metadata about a chunk * @chunk: chunk of interest * @full_scan: if we should scan from the beginning * * Iterates over the metadata blocks to find the largest contig area. * A full scan can be avoided on the allocation path as this is triggered * if we broke the contig_hint. In doing so, the scan_hint will be before * the contig_hint or after if the scan_hint == contig_hint. This cannot * be prevented on freeing as we want to find the largest area possibly * spanning blocks. */ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits; /* promote scan_hint to contig_hint */ if (!full_scan && chunk_md->scan_hint) { bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; chunk_md->contig_hint_start = chunk_md->scan_hint_start; chunk_md->contig_hint = chunk_md->scan_hint; chunk_md->scan_hint = 0; } else { bit_off = chunk_md->first_free; chunk_md->contig_hint = 0; } bits = 0; pcpu_for_each_md_free_region(chunk, bit_off, bits) pcpu_block_update(chunk_md, bit_off, bit_off + bits); } /** * pcpu_block_refresh_hint * @chunk: chunk of interest * @index: index of the metadata block * * Scans over the block beginning at first_free and updates the block * metadata accordingly. */ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) { struct pcpu_block_md *block = chunk->md_blocks + index; unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); unsigned int start, end; /* region start, region end */ /* promote scan_hint to contig_hint */ if (block->scan_hint) { start = block->scan_hint_start + block->scan_hint; block->contig_hint_start = block->scan_hint_start; block->contig_hint = block->scan_hint; block->scan_hint = 0; } else { start = block->first_free; block->contig_hint = 0; } block->right_free = 0; /* iterate over free areas and update the contig hints */ for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS) pcpu_block_update(block, start, end); } /** * pcpu_block_update_hint_alloc - update hint on allocation path * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of request * * Updates metadata for the allocation path. The metadata only has to be * refreshed by a full scan iff the chunk's contig hint is broken. Block level * scans are required if the block's contig hint is broken. */ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, int bits) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int nr_empty_pages = 0; struct pcpu_block_md *s_block, *e_block, *block; int s_index, e_index; /* block indexes of the freed allocation */ int s_off, e_off; /* block offsets of the freed allocation */ /* * Calculate per block offsets. * The calculation uses an inclusive range, but the resulting offsets * are [start, end). e_index always points to the last block in the * range. */ s_index = pcpu_off_to_block_index(bit_off); e_index = pcpu_off_to_block_index(bit_off + bits - 1); s_off = pcpu_off_to_block_off(bit_off); e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; s_block = chunk->md_blocks + s_index; e_block = chunk->md_blocks + e_index; /* * Update s_block. */ if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; /* * block->first_free must be updated if the allocation takes its place. * If the allocation breaks the contig_hint, a scan is required to * restore this hint. */ if (s_off == s_block->first_free) s_block->first_free = find_next_zero_bit( pcpu_index_alloc_map(chunk, s_index), PCPU_BITMAP_BLOCK_BITS, s_off + bits); if (pcpu_region_overlap(s_block->scan_hint_start, s_block->scan_hint_start + s_block->scan_hint, s_off, s_off + bits)) s_block->scan_hint = 0; if (pcpu_region_overlap(s_block->contig_hint_start, s_block->contig_hint_start + s_block->contig_hint, s_off, s_off + bits)) { /* block contig hint is broken - scan to fix it */ if (!s_off) s_block->left_free = 0; pcpu_block_refresh_hint(chunk, s_index); } else { /* update left and right contig manually */ s_block->left_free = min(s_block->left_free, s_off); if (s_index == e_index) s_block->right_free = min_t(int, s_block->right_free, PCPU_BITMAP_BLOCK_BITS - e_off); else s_block->right_free = 0; } /* * Update e_block. */ if (s_index != e_index) { if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; /* * When the allocation is across blocks, the end is along * the left part of the e_block. */ e_block->first_free = find_next_zero_bit( pcpu_index_alloc_map(chunk, e_index), PCPU_BITMAP_BLOCK_BITS, e_off); if (e_off == PCPU_BITMAP_BLOCK_BITS) { /* reset the block */ e_block++; } else { if (e_off > e_block->scan_hint_start) e_block->scan_hint = 0; e_block->left_free = 0; if (e_off > e_block->contig_hint_start) { /* contig hint is broken - scan to fix it */ pcpu_block_refresh_hint(chunk, e_index); } else { e_block->right_free = min_t(int, e_block->right_free, PCPU_BITMAP_BLOCK_BITS - e_off); } } /* update in-between md_blocks */ nr_empty_pages += (e_index - s_index - 1); for (block = s_block + 1; block < e_block; block++) { block->scan_hint = 0; block->contig_hint = 0; block->left_free = 0; block->right_free = 0; } } /* * If the allocation is not atomic, some blocks may not be * populated with pages, while we account it here. The number * of pages will be added back with pcpu_chunk_populated() * when populating pages. */ if (nr_empty_pages) pcpu_update_empty_pages(chunk, -nr_empty_pages); if (pcpu_region_overlap(chunk_md->scan_hint_start, chunk_md->scan_hint_start + chunk_md->scan_hint, bit_off, bit_off + bits)) chunk_md->scan_hint = 0; /* * The only time a full chunk scan is required is if the chunk * contig hint is broken. Otherwise, it means a smaller space * was used and therefore the chunk contig hint is still correct. */ if (pcpu_region_overlap(chunk_md->contig_hint_start, chunk_md->contig_hint_start + chunk_md->contig_hint, bit_off, bit_off + bits)) pcpu_chunk_refresh_hint(chunk, false); } /** * pcpu_block_update_hint_free - updates the block hints on the free path * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of request * * Updates metadata for the allocation path. This avoids a blind block * refresh by making use of the block contig hints. If this fails, it scans * forward and backward to determine the extent of the free area. This is * capped at the boundary of blocks. * * A chunk update is triggered if a page becomes free, a block becomes free, * or the free spans across blocks. This tradeoff is to minimize iterating * over the block metadata to update chunk_md->contig_hint. * chunk_md->contig_hint may be off by up to a page, but it will never be more * than the available space. If the contig hint is contained in one block, it * will be accurate. */ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, int bits) { int nr_empty_pages = 0; struct pcpu_block_md *s_block, *e_block, *block; int s_index, e_index; /* block indexes of the freed allocation */ int s_off, e_off; /* block offsets of the freed allocation */ int start, end; /* start and end of the whole free area */ /* * Calculate per block offsets. * The calculation uses an inclusive range, but the resulting offsets * are [start, end). e_index always points to the last block in the * range. */ s_index = pcpu_off_to_block_index(bit_off); e_index = pcpu_off_to_block_index(bit_off + bits - 1); s_off = pcpu_off_to_block_off(bit_off); e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; s_block = chunk->md_blocks + s_index; e_block = chunk->md_blocks + e_index; /* * Check if the freed area aligns with the block->contig_hint. * If it does, then the scan to find the beginning/end of the * larger free area can be avoided. * * start and end refer to beginning and end of the free area * within each their respective blocks. This is not necessarily * the entire free area as it may span blocks past the beginning * or end of the block. */ start = s_off; if (s_off == s_block->contig_hint + s_block->contig_hint_start) { start = s_block->contig_hint_start; } else { /* * Scan backwards to find the extent of the free area. * find_last_bit returns the starting bit, so if the start bit * is returned, that means there was no last bit and the * remainder of the chunk is free. */ int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), start); start = (start == l_bit) ? 0 : l_bit + 1; } end = e_off; if (e_off == e_block->contig_hint_start) end = e_block->contig_hint_start + e_block->contig_hint; else end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), PCPU_BITMAP_BLOCK_BITS, end); /* update s_block */ e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; pcpu_block_update(s_block, start, e_off); /* freeing in the same block */ if (s_index != e_index) { /* update e_block */ if (end == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; pcpu_block_update(e_block, 0, end); /* reset md_blocks in the middle */ nr_empty_pages += (e_index - s_index - 1); for (block = s_block + 1; block < e_block; block++) { block->first_free = 0; block->scan_hint = 0; block->contig_hint_start = 0; block->contig_hint = PCPU_BITMAP_BLOCK_BITS; block->left_free = PCPU_BITMAP_BLOCK_BITS; block->right_free = PCPU_BITMAP_BLOCK_BITS; } } if (nr_empty_pages) pcpu_update_empty_pages(chunk, nr_empty_pages); /* * Refresh chunk metadata when the free makes a block free or spans * across blocks. The contig_hint may be off by up to a page, but if * the contig_hint is contained in a block, it will be accurate with * the else condition below. */ if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) pcpu_chunk_refresh_hint(chunk, true); else pcpu_block_update(&chunk->chunk_md, pcpu_block_off_to_off(s_index, start), end); } /** * pcpu_is_populated - determines if the region is populated * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of area * @next_off: return value for the next offset to start searching * * For atomic allocations, check if the backing pages are populated. * * RETURNS: * Bool if the backing pages are populated. * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. */ static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) { unsigned int start, end; start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); start = find_next_zero_bit(chunk->populated, end, start); if (start >= end) return true; end = find_next_bit(chunk->populated, end, start + 1); *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; return false; } /** * pcpu_find_block_fit - finds the block index to start searching * @chunk: chunk of interest * @alloc_bits: size of request in allocation units * @align: alignment of area (max PAGE_SIZE bytes) * @pop_only: use populated regions only * * Given a chunk and an allocation spec, find the offset to begin searching * for a free region. This iterates over the bitmap metadata blocks to * find an offset that will be guaranteed to fit the requirements. It is * not quite first fit as if the allocation does not fit in the contig hint * of a block or chunk, it is skipped. This errs on the side of caution * to prevent excess iteration. Poor alignment can cause the allocator to * skip over blocks and chunks that have valid free areas. * * RETURNS: * The offset in the bitmap to begin searching. * -1 if no offset is found. */ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, size_t align, bool pop_only) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits, next_off; /* * This is an optimization to prevent scanning by assuming if the * allocation cannot fit in the global hint, there is memory pressure * and creating a new chunk would happen soon. */ if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) return -1; bit_off = pcpu_next_hint(chunk_md, alloc_bits); bits = 0; pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, &next_off)) break; bit_off = next_off; bits = 0; } if (bit_off == pcpu_chunk_map_bits(chunk)) return -1; return bit_off; } /* * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() * @map: the address to base the search on * @size: the bitmap size in bits * @start: the bitnumber to start searching at * @nr: the number of zeroed bits we're looking for * @align_mask: alignment mask for zero area * @largest_off: offset of the largest area skipped * @largest_bits: size of the largest area skipped * * The @align_mask should be one less than a power of 2. * * This is a modified version of bitmap_find_next_zero_area_off() to remember * the largest area that was skipped. This is imperfect, but in general is * good enough. The largest remembered region is the largest failed region * seen. This does not include anything we possibly skipped due to alignment. * pcpu_block_update_scan() does scan backwards to try and recover what was * lost to alignment. While this can cause scanning to miss earlier possible * free areas, smaller allocations will eventually fill those holes. */ static unsigned long pcpu_find_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned long nr, unsigned long align_mask, unsigned long *largest_off, unsigned long *largest_bits) { unsigned long index, end, i, area_off, area_bits; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ index = __ALIGN_MASK(index, align_mask); area_off = index; end = index + nr; if (end > size) return end; i = find_next_bit(map, end, index); if (i < end) { area_bits = i - area_off; /* remember largest unused area with best alignment */ if (area_bits > *largest_bits || (area_bits == *largest_bits && *largest_off && (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { *largest_off = area_off; *largest_bits = area_bits; } start = i + 1; goto again; } return index; } /** * pcpu_alloc_area - allocates an area from a pcpu_chunk * @chunk: chunk of interest * @alloc_bits: size of request in allocation units * @align: alignment of area (max PAGE_SIZE) * @start: bit_off to start searching * * This function takes in a @start offset to begin searching to fit an * allocation of @alloc_bits with alignment @align. It needs to scan * the allocation map because if it fits within the block's contig hint, * @start will be block->first_free. This is an attempt to fill the * allocation prior to breaking the contig hint. The allocation and * boundary maps are updated accordingly if it confirms a valid * free area. * * RETURNS: * Allocated addr offset in @chunk on success. * -1 if no matching area is found. */ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, size_t align, int start) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; size_t align_mask = (align) ? (align - 1) : 0; unsigned long area_off = 0, area_bits = 0; int bit_off, end, oslot; lockdep_assert_held(&pcpu_lock); oslot = pcpu_chunk_slot(chunk); /* * Search to find a fit. */ end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, pcpu_chunk_map_bits(chunk)); bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, align_mask, &area_off, &area_bits); if (bit_off >= end) return -1; if (area_bits) pcpu_block_update_scan(chunk, area_off, area_bits); /* update alloc map */ bitmap_set(chunk->alloc_map, bit_off, alloc_bits); /* update boundary map */ set_bit(bit_off, chunk->bound_map); bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); set_bit(bit_off + alloc_bits, chunk->bound_map); chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; /* update first free bit */ if (bit_off == chunk_md->first_free) chunk_md->first_free = find_next_zero_bit( chunk->alloc_map, pcpu_chunk_map_bits(chunk), bit_off + alloc_bits); pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); pcpu_chunk_relocate(chunk, oslot); return bit_off * PCPU_MIN_ALLOC_SIZE; } /** * pcpu_free_area - frees the corresponding offset * @chunk: chunk of interest * @off: addr offset into chunk * * This function determines the size of an allocation to free using * the boundary bitmap and clears the allocation map. * * RETURNS: * Number of freed bytes. */ static int pcpu_free_area(struct pcpu_chunk *chunk, int off) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits, end, oslot, freed; lockdep_assert_held(&pcpu_lock); pcpu_stats_area_dealloc(chunk); oslot = pcpu_chunk_slot(chunk); bit_off = off / PCPU_MIN_ALLOC_SIZE; /* find end index */ end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), bit_off + 1); bits = end - bit_off; bitmap_clear(chunk->alloc_map, bit_off, bits); freed = bits * PCPU_MIN_ALLOC_SIZE; /* update metadata */ chunk->free_bytes += freed; /* update first free bit */ chunk_md->first_free = min(chunk_md->first_free, bit_off); pcpu_block_update_hint_free(chunk, bit_off, bits); pcpu_chunk_relocate(chunk, oslot); return freed; } static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) { block->scan_hint = 0; block->contig_hint = nr_bits; block->left_free = nr_bits; block->right_free = nr_bits; block->first_free = 0; block->nr_bits = nr_bits; } static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) { struct pcpu_block_md *md_block; /* init the chunk's block */ pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); for (md_block = chunk->md_blocks; md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); md_block++) pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); } /** * pcpu_alloc_first_chunk - creates chunks that serve the first chunk * @tmp_addr: the start of the region served * @map_size: size of the region served * * This is responsible for creating the chunks that serve the first chunk. The * base_addr is page aligned down of @tmp_addr while the region end is page * aligned up. Offsets are kept track of to determine the region served. All * this is done to appease the bitmap allocator in avoiding partial blocks. * * RETURNS: * Chunk serving the region at @tmp_addr of @map_size. */ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, int map_size) { struct pcpu_chunk *chunk; unsigned long aligned_addr; int start_offset, offset_bits, region_size, region_bits; size_t alloc_size; /* region calculations */ aligned_addr = tmp_addr & PAGE_MASK; start_offset = tmp_addr - aligned_addr; region_size = ALIGN(start_offset + map_size, PAGE_SIZE); /* allocate chunk */ alloc_size = struct_size(chunk, populated, BITS_TO_LONGS(region_size >> PAGE_SHIFT)); chunk = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); INIT_LIST_HEAD(&chunk->list); chunk->base_addr = (void *)aligned_addr; chunk->start_offset = start_offset; chunk->end_offset = region_size - chunk->start_offset - map_size; chunk->nr_pages = region_size >> PAGE_SHIFT; region_bits = pcpu_chunk_map_bits(chunk); alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); chunk->alloc_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); chunk->bound_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); chunk->md_blocks = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); #ifdef NEED_PCPUOBJ_EXT /* first chunk is free to use */ chunk->obj_exts = NULL; #endif pcpu_init_md_blocks(chunk); /* manage populated page bitmap */ chunk->immutable = true; bitmap_fill(chunk->populated, chunk->nr_pages); chunk->nr_populated = chunk->nr_pages; chunk->nr_empty_pop_pages = chunk->nr_pages; chunk->free_bytes = map_size; if (chunk->start_offset) { /* hide the beginning of the bitmap */ offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; bitmap_set(chunk->alloc_map, 0, offset_bits); set_bit(0, chunk->bound_map); set_bit(offset_bits, chunk->bound_map); chunk->chunk_md.first_free = offset_bits; pcpu_block_update_hint_alloc(chunk, 0, offset_bits); } if (chunk->end_offset) { /* hide the end of the bitmap */ offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; bitmap_set(chunk->alloc_map, pcpu_chunk_map_bits(chunk) - offset_bits, offset_bits); set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, chunk->bound_map); set_bit(region_bits, chunk->bound_map); pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) - offset_bits, offset_bits); } return chunk; } static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; int region_bits; chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->list); chunk->nr_pages = pcpu_unit_pages; region_bits = pcpu_chunk_map_bits(chunk); chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), gfp); if (!chunk->alloc_map) goto alloc_map_fail; chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), gfp); if (!chunk->bound_map) goto bound_map_fail; chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), gfp); if (!chunk->md_blocks) goto md_blocks_fail; #ifdef NEED_PCPUOBJ_EXT if (need_pcpuobj_ext()) { chunk->obj_exts = pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * sizeof(struct pcpuobj_ext), gfp); if (!chunk->obj_exts) goto objcg_fail; } #endif pcpu_init_md_blocks(chunk); /* init metadata */ chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; return chunk; #ifdef NEED_PCPUOBJ_EXT objcg_fail: pcpu_mem_free(chunk->md_blocks); #endif md_blocks_fail: pcpu_mem_free(chunk->bound_map); bound_map_fail: pcpu_mem_free(chunk->alloc_map); alloc_map_fail: pcpu_mem_free(chunk); return NULL; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; #ifdef NEED_PCPUOBJ_EXT pcpu_mem_free(chunk->obj_exts); #endif pcpu_mem_free(chunk->md_blocks); pcpu_mem_free(chunk->bound_map); pcpu_mem_free(chunk->alloc_map); pcpu_mem_free(chunk); } /** * pcpu_chunk_populated - post-population bookkeeping * @chunk: pcpu_chunk which got populated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been populated to @chunk. Update * the bookkeeping information accordingly. Must be called after each * successful population. */ static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_set(chunk->populated, page_start, nr); chunk->nr_populated += nr; pcpu_nr_populated += nr; pcpu_update_empty_pages(chunk, nr); } /** * pcpu_chunk_depopulated - post-depopulation bookkeeping * @chunk: pcpu_chunk which got depopulated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been depopulated from @chunk. * Update the bookkeeping information accordingly. Must be called after * each successful depopulation. */ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_clear(chunk->populated, page_start, nr); chunk->nr_populated -= nr; pcpu_nr_populated -= nr; pcpu_update_empty_pages(chunk, -nr); } /* * Chunk management implementation. * * To allow different implementations, chunk alloc/free and * [de]population are implemented in a separate file which is pulled * into this file and compiled together. The following functions * should be implemented. * * pcpu_populate_chunk - populate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk * pcpu_create_chunk - create a new chunk * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_addr_to_page - translate address to physical address * pcpu_verify_alloc_info - check alloc_info is acceptable during init */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end); static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end); static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); #ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else #include "percpu-vm.c" #endif /** * pcpu_chunk_addr_search - determine chunk containing specified address * @addr: address for which the chunk needs to be determined. * * This is an internal function that handles all but static allocations. * Static percpu address values should never be passed into the allocator. * * RETURNS: * The address of the found chunk. */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { /* is it in the dynamic region (first chunk)? */ if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) return pcpu_first_chunk; /* is it in the reserved region? */ if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) return pcpu_reserved_chunk; /* * The address is relative to unit0 which might be unused and * thus unmapped. Offset the address to the unit space of the * current processor before looking it up in the vmalloc * space. Note that any possible cpu id can be used here, so * there's no need to worry about preemption or cpu hotplug. */ addr += pcpu_unit_offsets[raw_smp_processor_id()]; return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); } #ifdef CONFIG_MEMCG static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) { struct obj_cgroup *objcg; if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT)) return true; objcg = current_obj_cgroup(); if (!objcg) return true; if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) return false; *objcgp = objcg; return true; } static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) { if (!objcg) return; if (likely(chunk && chunk->obj_exts)) { obj_cgroup_get(objcg); chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg; rcu_read_lock(); mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, pcpu_obj_full_size(size)); rcu_read_unlock(); } else { obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); } } static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { struct obj_cgroup *objcg; if (unlikely(!chunk->obj_exts)) return; objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup; if (!objcg) return; chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL; obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); rcu_read_lock(); mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, -pcpu_obj_full_size(size)); rcu_read_unlock(); obj_cgroup_put(objcg); } #else /* CONFIG_MEMCG */ static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) { return true; } static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) { } static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { } #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MEM_ALLOC_PROFILING static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off, size_t size) { if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) { alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, current->alloc_tag, size); } } static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size); } #else static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off, size_t size) { } static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { } #endif /** * pcpu_alloc - the percpu allocator * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @reserved: allocate from the reserved chunk if available * @gfp: allocation flags * * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN * then no warning will be triggered on invalid or failed allocation * requests. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp_t gfp) { gfp_t pcpu_gfp; bool is_atomic; bool do_warn; struct obj_cgroup *objcg = NULL; static int warn_limit = 10; struct pcpu_chunk *chunk, *next; const char *err; int slot, off, cpu, ret; unsigned long flags; void __percpu *ptr; size_t bits, bit_align; gfp = current_gfp_context(gfp); /* whitelisted flags that can be passed to the backing allocators */ pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; do_warn = !(gfp & __GFP_NOWARN); /* * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, * therefore alignment must be a minimum of that many bytes. * An allocation may have internal fragmentation from rounding up * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. */ if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) align = PCPU_MIN_ALLOC_SIZE; size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); bits = size >> PCPU_MIN_ALLOC_SHIFT; bit_align = align >> PCPU_MIN_ALLOC_SHIFT; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || !is_power_of_2(align))) { WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", size, align); return NULL; } if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) return NULL; if (!is_atomic) { /* * pcpu_balance_workfn() allocates memory under this mutex, * and it may wait for memory reclaim. Allow current task * to become OOM victim, in case of memory pressure. */ if (gfp & __GFP_NOFAIL) { mutex_lock(&pcpu_alloc_mutex); } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); return NULL; } } spin_lock_irqsave(&pcpu_lock, flags); /* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); if (off < 0) { err = "alloc from reserved chunk failed"; goto fail_unlock; } off = pcpu_alloc_area(chunk, bits, bit_align, off); if (off >= 0) goto area_found; err = "alloc from reserved chunk failed"; goto fail_unlock; } restart: /* search through normal chunks */ for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], list) { off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); if (off < 0) { if (slot < PCPU_SLOT_FAIL_THRESHOLD) pcpu_chunk_move(chunk, 0); continue; } off = pcpu_alloc_area(chunk, bits, bit_align, off); if (off >= 0) { pcpu_reintegrate_chunk(chunk); goto area_found; } } } spin_unlock_irqrestore(&pcpu_lock, flags); if (is_atomic) { err = "atomic alloc failed, no space left"; goto fail; } /* No space left. Create a new chunk. */ if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { chunk = pcpu_create_chunk(pcpu_gfp); if (!chunk) { err = "failed to allocate new chunk"; goto fail; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); } else { spin_lock_irqsave(&pcpu_lock, flags); } goto restart; area_found: pcpu_stats_area_alloc(chunk, size); if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work(); spin_unlock_irqrestore(&pcpu_lock, flags); /* populate if not all pages are already there */ if (!is_atomic) { unsigned int page_end, rs, re; rs = PFN_DOWN(off); page_end = PFN_UP(off + size); for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); spin_lock_irqsave(&pcpu_lock, flags); if (ret) { pcpu_free_area(chunk, off); err = "failed to populate"; goto fail_unlock; } pcpu_chunk_populated(chunk, rs, re); spin_unlock_irqrestore(&pcpu_lock, flags); } mutex_unlock(&pcpu_alloc_mutex); } /* clear the areas and return address relative to base address */ for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); kmemleak_alloc_percpu(ptr, size, gfp); trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align, chunk->base_addr, off, ptr, pcpu_obj_full_size(size), gfp); pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); pcpu_alloc_tag_alloc_hook(chunk, off, size); return ptr; fail_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); fail: trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); if (do_warn && warn_limit) { pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", size, align, is_atomic, err); if (!is_atomic) dump_stack(); if (!--warn_limit) pr_info("limit reached, disable warning\n"); } if (is_atomic) { /* see the flag handling in pcpu_balance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); } else { mutex_unlock(&pcpu_alloc_mutex); } pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); return NULL; } EXPORT_SYMBOL_GPL(pcpu_alloc_noprof); /** * pcpu_balance_free - manage the amount of free chunks * @empty_only: free chunks only if there are no populated pages * * If empty_only is %false, reclaim all fully free chunks regardless of the * number of populated pages. Otherwise, only reclaim chunks that have no * populated pages. * * CONTEXT: * pcpu_lock (can be dropped temporarily) */ static void pcpu_balance_free(bool empty_only) { LIST_HEAD(to_free); struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; struct pcpu_chunk *chunk, *next; lockdep_assert_held(&pcpu_lock); /* * There's no reason to keep around multiple unused chunks and VM * areas can be scarce. Destroy all free chunks except for one. */ list_for_each_entry_safe(chunk, next, free_head, list) { WARN_ON(chunk->immutable); /* spare the first one */ if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) continue; if (!empty_only || chunk->nr_empty_pop_pages == 0) list_move(&chunk->list, &to_free); } if (list_empty(&to_free)) return; spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &to_free, list) { unsigned int rs, re; for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) { pcpu_depopulate_chunk(chunk, rs, re); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, rs, re); spin_unlock_irq(&pcpu_lock); } pcpu_destroy_chunk(chunk); cond_resched(); } spin_lock_irq(&pcpu_lock); } /** * pcpu_balance_populated - manage the amount of populated pages * * Maintain a certain amount of populated pages to satisfy atomic allocations. * It is possible that this is called when physical memory is scarce causing * OOM killer to be triggered. We should avoid doing so until an actual * allocation causes the failure as it is possible that requests can be * serviced from already backed regions. * * CONTEXT: * pcpu_lock (can be dropped temporarily) */ static void pcpu_balance_populated(void) { /* gfp flags passed to underlying allocators */ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; struct pcpu_chunk *chunk; int slot, nr_to_pop, ret; lockdep_assert_held(&pcpu_lock); /* * Ensure there are certain number of free populated pages for * atomic allocs. Fill up from the most packed so that atomic * allocs don't increase fragmentation. If atomic allocation * failed previously, always populate the maximum amount. This * should prevent atomic allocs larger than PAGE_SIZE from keeping * failing indefinitely; however, large atomic allocs are not * something we support properly and can be highly unreliable and * inefficient. */ retry_pop: if (pcpu_atomic_alloc_failed) { nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; /* best effort anyway, don't worry about synchronization */ pcpu_atomic_alloc_failed = false; } else { nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - pcpu_nr_empty_pop_pages, 0, PCPU_EMPTY_POP_PAGES_HIGH); } for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { unsigned int nr_unpop = 0, rs, re; if (!nr_to_pop) break; list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { nr_unpop = chunk->nr_pages - chunk->nr_populated; if (nr_unpop) break; } if (!nr_unpop) continue; /* @chunk can't go away while pcpu_alloc_mutex is held */ for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) { int nr = min_t(int, re - rs, nr_to_pop); spin_unlock_irq(&pcpu_lock); ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); cond_resched(); spin_lock_irq(&pcpu_lock); if (!ret) { nr_to_pop -= nr; pcpu_chunk_populated(chunk, rs, rs + nr); } else { nr_to_pop = 0; } if (!nr_to_pop) break; } } if (nr_to_pop) { /* ran out of chunks to populate, create a new one and retry */ spin_unlock_irq(&pcpu_lock); chunk = pcpu_create_chunk(gfp); cond_resched(); spin_lock_irq(&pcpu_lock); if (chunk) { pcpu_chunk_relocate(chunk, -1); goto retry_pop; } } } /** * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages * * Scan over chunks in the depopulate list and try to release unused populated * pages back to the system. Depopulated chunks are sidelined to prevent * repopulating these pages unless required. Fully free chunks are reintegrated * and freed accordingly (1 is kept around). If we drop below the empty * populated pages threshold, reintegrate the chunk if it has empty free pages. * Each chunk is scanned in the reverse order to keep populated pages close to * the beginning of the chunk. * * CONTEXT: * pcpu_lock (can be dropped temporarily) * */ static void pcpu_reclaim_populated(void) { struct pcpu_chunk *chunk; struct pcpu_block_md *block; int freed_page_start, freed_page_end; int i, end; bool reintegrate; lockdep_assert_held(&pcpu_lock); /* * Once a chunk is isolated to the to_depopulate list, the chunk is no * longer discoverable to allocations whom may populate pages. The only * other accessor is the free path which only returns area back to the * allocator not touching the populated bitmap. */ while ((chunk = list_first_entry_or_null( &pcpu_chunk_lists[pcpu_to_depopulate_slot], struct pcpu_chunk, list))) { WARN_ON(chunk->immutable); /* * Scan chunk's pages in the reverse order to keep populated * pages close to the beginning of the chunk. */ freed_page_start = chunk->nr_pages; freed_page_end = 0; reintegrate = false; for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { /* no more work to do */ if (chunk->nr_empty_pop_pages == 0) break; /* reintegrate chunk to prevent atomic alloc failures */ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { reintegrate = true; break; } /* * If the page is empty and populated, start or * extend the (i, end) range. If i == 0, decrease * i and perform the depopulation to cover the last * (first) page in the chunk. */ block = chunk->md_blocks + i; if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && test_bit(i, chunk->populated)) { if (end == -1) end = i; if (i > 0) continue; i--; } /* depopulate if there is an active range */ if (end == -1) continue; spin_unlock_irq(&pcpu_lock); pcpu_depopulate_chunk(chunk, i + 1, end + 1); cond_resched(); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, i + 1, end + 1); freed_page_start = min(freed_page_start, i + 1); freed_page_end = max(freed_page_end, end + 1); /* reset the range and continue */ end = -1; } /* batch tlb flush per chunk to amortize cost */ if (freed_page_start < freed_page_end) { spin_unlock_irq(&pcpu_lock); pcpu_post_unmap_tlb_flush(chunk, freed_page_start, freed_page_end); cond_resched(); spin_lock_irq(&pcpu_lock); } if (reintegrate || chunk->free_bytes == pcpu_unit_size) pcpu_reintegrate_chunk(chunk); else list_move_tail(&chunk->list, &pcpu_chunk_lists[pcpu_sidelined_slot]); } } /** * pcpu_balance_workfn - manage the amount of free chunks and populated pages * @work: unused * * For each chunk type, manage the number of fully free chunks and the number of * populated pages. An important thing to consider is when pages are freed and * how they contribute to the global counts. */ static void pcpu_balance_workfn(struct work_struct *work) { /* * pcpu_balance_free() is called twice because the first time we may * trim pages in the active pcpu_nr_empty_pop_pages which may cause us * to grow other chunks. This then gives pcpu_reclaim_populated() time * to move fully free chunks to the active list to be freed if * appropriate. */ mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); pcpu_balance_free(false); pcpu_reclaim_populated(); pcpu_balance_populated(); pcpu_balance_free(true); spin_unlock_irq(&pcpu_lock); mutex_unlock(&pcpu_alloc_mutex); } /** * free_percpu - free percpu area * @ptr: pointer to area to free * * Free percpu area @ptr. * * CONTEXT: * Can be called from atomic context. */ void free_percpu(void __percpu *ptr) { void *addr; struct pcpu_chunk *chunk; unsigned long flags; int size, off; bool need_balance = false; if (!ptr) return; kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); pcpu_alloc_tag_free_hook(chunk, off, size); pcpu_memcg_free_hook(chunk, off, size); /* * If there are more than one fully free chunks, wake up grim reaper. * If the chunk is isolated, it may be in the process of being * reclaimed. Let reclaim manage cleaning up of that chunk. */ if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { struct pcpu_chunk *pos; list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) if (pos != chunk) { need_balance = true; break; } } else if (pcpu_should_reclaim_chunk(chunk)) { pcpu_isolate_chunk(chunk); need_balance = true; } trace_percpu_free_percpu(chunk->base_addr, off, ptr); spin_unlock_irqrestore(&pcpu_lock, flags); if (need_balance) pcpu_schedule_balance_work(); } EXPORT_SYMBOL_GPL(free_percpu); bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); void *va = (void *)addr; if (va >= start && va < start + static_size) { if (can_addr) { *can_addr = (unsigned long) (va - start); *can_addr += (unsigned long) per_cpu_ptr(base, get_boot_cpu_id()); } return true; } } #endif /* on UP, can't distinguish from other static vars, always false */ return false; } /** * is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test * * Test whether @addr belongs to in-kernel static percpu area. Module * static percpu areas are not considered. For those, use * is_module_percpu_address(). * * RETURNS: * %true if @addr is from in-kernel static percpu area, %false otherwise. */ bool is_kernel_percpu_address(unsigned long addr) { return __is_kernel_percpu_address(addr, NULL); } /** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * * Given @addr which is dereferenceable address obtained via one of * percpu access macros, this function translates it into its physical * address. The caller is responsible for ensuring @addr stays valid * until this function finishes. * * percpu allocator has special setup for the first chunk, which currently * supports either embedding in linear address space or vmalloc mapping, * and, from the second one, the backing allocator (currently either vm or * km) provides translation. * * The addr can be translated simply without checking if it falls into the * first chunk. But the current code reflects better how percpu allocator * actually works, and the verification can discover both bugs in percpu * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current * code. * * RETURNS: * The physical address for @addr. */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; unsigned long first_low, first_high; unsigned int cpu; /* * The following test on unit_low/high isn't strictly * necessary but will speed up lookups of addresses which * aren't in the first chunk. * * The address check is against full chunk sizes. pcpu_base_addr * points to the beginning of the first chunk including the * static region. Assumes good intent as the first chunk may * not be full (ie. < pcpu_unit_pages in size). */ first_low = (unsigned long)pcpu_base_addr + pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); first_high = (unsigned long)pcpu_base_addr + pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { if (!is_vmalloc_addr(addr)) return __pa(addr); else return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } else return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); } /** * pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups * @nr_units: the number of units * * Allocate ai which is large enough for @nr_groups groups containing * @nr_units units. The returned ai's groups[0].cpu_map points to the * cpu_map array which is long enough for @nr_units and filled with * NR_CPUS. It's the caller's responsibility to initialize cpu_map * pointer of other groups. * * RETURNS: * Pointer to the allocated pcpu_alloc_info on success, NULL on * failure. */ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(struct_size(ai, groups, nr_groups), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } /** * pcpu_free_alloc_info - free percpu allocation info * @ai: pcpu_alloc_info to free * * Free @ai which was allocated by pcpu_alloc_alloc_info(). */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { memblock_free(ai, ai->__ai_size); } /** * pcpu_dump_alloc_info - print out information about pcpu_alloc_info * @lvl: loglevel * @ai: allocation info to dump * * Print out information about @ai using loglevel @lvl. */ static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) { int group_width = 1, cpu_width = 1, width; char empty_str[] = "--------"; int alloc = 0, alloc_end = 0; int group, v; int upa, apl; /* units per alloc, allocs per line */ v = ai->nr_groups; while (v /= 10) group_width++; v = num_possible_cpus(); while (v /= 10) cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { pr_cont("\n"); printk("%spcpu-alloc: ", lvl); } pr_cont("[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) pr_cont("%0*d ", cpu_width, gi->cpu_map[unit]); else pr_cont("%s ", empty_str); } } pr_cont("\n"); } /** * pcpu_setup_first_chunk - initialize the first percpu chunk * @ai: pcpu_alloc_info describing how to percpu area is shaped * @base_addr: mapped address * * Initialize the first percpu chunk which contains the kernel static * percpu area. This function is to be called from arch percpu area * setup path. * * @ai contains all information necessary to initialize the first * chunk and prime the dynamic percpu allocator. * * @ai->static_size is the size of static percpu area. * * @ai->reserved_size, if non-zero, specifies the amount of bytes to * reserve after the static area in the first chunk. This reserves * the first chunk such that it's available only through reserved * percpu allocation. This is primarily used to serve module percpu * static areas on architectures where the addressing model has * limited offset range for symbol relocations to guarantee module * percpu symbols fall inside the relocatable range. * * @ai->dyn_size determines the number of bytes available for dynamic * allocation in the first chunk. The area between @ai->static_size + * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. * * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE * and equal to or larger than @ai->static_size + @ai->reserved_size + * @ai->dyn_size. * * @ai->atom_size is the allocation atom size and used as alignment * for vm areas. * * @ai->alloc_size is the allocation size and always multiple of * @ai->atom_size. This is larger than @ai->atom_size if * @ai->unit_size is larger than @ai->atom_size. * * @ai->nr_groups and @ai->groups describe virtual memory layout of * percpu areas. Units which should be colocated are put into the * same group. Dynamic VM areas will be allocated according to these * groupings. If @ai->nr_groups is zero, a single group containing * all units is assumed. * * The caller should have mapped the first chunk at @base_addr and * copied static data to each unit. * * The first chunk will always contain a static and a dynamic region. * However, the static region is not managed by any chunk. If the first * chunk also contains a reserved region, it is served by two chunks - * one for the reserved region and one for the dynamic region. They * share the same vm, but use offset regions in the area allocation map. * The chunk serving the dynamic region is circulated in the chunk slots * and available for dynamic allocation like any other chunk. */ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; size_t static_size, dyn_size; unsigned long *group_offsets; size_t *group_sizes; unsigned long *unit_off; unsigned int cpu; int *unit_map; int group, unit, i; unsigned long tmp_addr; size_t alloc_size; #define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ pr_emerg("failed to initialize, %s\n", #cond); \ pr_emerg("cpu_possible_mask=%*pb\n", \ cpumask_pr_args(cpu_possible_mask)); \ pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) /* sanity checks */ PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); #ifdef CONFIG_SMP PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); #endif PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ alloc_size = ai->nr_groups * sizeof(group_offsets[0]); group_offsets = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = ai->nr_groups * sizeof(group_sizes[0]); group_sizes = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = nr_cpu_ids * sizeof(unit_map[0]); unit_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = nr_cpu_ids * sizeof(unit_off[0]); unit_off = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); unit_map[cpu] = unit + i; unit_off[cpu] = gi->base_offset + i * ai->unit_size; /* determine low/high unit_cpu */ if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; } } pcpu_nr_units = unit; for_each_possible_cpu(cpu) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); /* we're done parsing the input, undefine BUG macro and dump config */ #undef PCPU_SETUP_BUG_ON pcpu_dump_alloc_info(KERN_DEBUG, ai); pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; pcpu_unit_map = unit_map; pcpu_unit_offsets = unit_off; /* determine basic parameters */ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated, BITS_TO_LONGS(pcpu_unit_pages)); pcpu_stats_save_ai(ai); /* * Allocate chunk slots. The slots after the active slots are: * sidelined_slot - isolated, depopulated chunks * free_slot - fully free chunks * to_depopulate_slot - isolated, chunks to depopulate */ pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; pcpu_free_slot = pcpu_sidelined_slot + 1; pcpu_to_depopulate_slot = pcpu_free_slot + 1; pcpu_nr_slots = pcpu_to_depopulate_slot + 1; pcpu_chunk_lists = memblock_alloc_or_panic(pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]), SMP_CACHE_BYTES); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_chunk_lists[i]); /* * The end of the static region needs to be aligned with the * minimum allocation size as this offsets the reserved and * dynamic region. The first chunk ends page aligned by * expanding the dynamic region, therefore the dynamic region * can be shrunk to compensate while still staying above the * configured sizes. */ static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); dyn_size = ai->dyn_size - (static_size - ai->static_size); /* * Initialize first chunk: * This chunk is broken up into 3 parts: * < static | [reserved] | dynamic > * - static - there is no backing chunk because these allocations can * never be freed. * - reserved (pcpu_reserved_chunk) - exists primarily to serve * allocations from module load. * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first * chunk. */ tmp_addr = (unsigned long)base_addr + static_size; if (ai->reserved_size) pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, ai->reserved_size); tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size; pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size); pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; pcpu_chunk_relocate(pcpu_first_chunk, -1); /* include all regions of the first chunk */ pcpu_nr_populated += PFN_DOWN(size_sum); pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(base_addr); /* we're done */ pcpu_base_addr = base_addr; } #ifdef CONFIG_SMP const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { [PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", }; enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; static int __init percpu_alloc_setup(char *str) { if (!str) return -EINVAL; if (0) /* nada */; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif else pr_warn("unknown allocator %s specified\n", str); return 0; } early_param("percpu_alloc", percpu_alloc_setup); /* * pcpu_embed_first_chunk() is used by the generic percpu setup. * Build it if needed by the arch config or the generic setup is going * to be used. */ #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) #define BUILD_EMBED_FIRST_CHUNK #endif /* build pcpu_page_first_chunk() iff needed by the arch config */ #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif /* pcpu_build_alloc_info() is used by both embed and page first chunk */ #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * * This function determines grouping of units, their mappings to cpus * and other parameters considering needed percpu size, allocation * atom size and distances between CPUs. * * Groups are always multiples of atom size and CPUs which are of * LOCAL_DISTANCE both ways are grouped together and share space for * units in the same group. The returned configuration is guaranteed * to have CPUs on different nodes on different groups and >=75% usage * of allocated virtual address space. * * RETURNS: * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; static struct cpumask mask __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, best_upa; /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); cpumask_clear(&mask); /* calculate size_sum and ensure dyn_size is enough for early alloc */ size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest * which can accommodate 4k aligned segments which are equal to * or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); /* determine the maximum # of units that can fit in an allocation */ alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; while (alloc_size % upa || (offset_in_page(alloc_size / upa))) upa--; max_upa = upa; cpumask_copy(&mask, cpu_possible_mask); /* group cpus according to their proximity */ for (group = 0; !cpumask_empty(&mask); group++) { /* pop the group's first cpu */ cpu = cpumask_first(&mask); group_map[cpu] = group; group_cnt[group]++; cpumask_clear_cpu(cpu, &mask); for_each_cpu(tcpu, &mask) { if (!cpu_distance_fn || (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { group_map[tcpu] = group; group_cnt[group]++; cpumask_clear_cpu(tcpu, &mask); } } } nr_groups = group; /* * Wasted space is caused by a ratio imbalance of upa to group_cnt. * Expand the unit_size until we use >= 75% of the units allocated. * Related to atom_size, which could be much larger than the unit_size. */ last_allocs = INT_MAX; best_upa = 0; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; if (alloc_size % upa || (offset_in_page(alloc_size / upa))) continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } /* * Don't accept if wastage is over 1/3. The * greater-than comparison ensures upa==1 always * passes the following check. */ if (wasted > num_possible_cpus() / 3) continue; /* and then don't consume more memory */ if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } BUG_ON(!best_upa); upa = best_upa; /* allocate and fill alloc_info */ for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group < nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; /* * Initialize base_offset as if all groups are located * back-to-back. The caller should update this to * reflect actual allocation. */ gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NUMA int node = NUMA_NO_NODE; void *ptr; if (cpu_to_nd_fn) node = cpu_to_nd_fn(cpu); if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) { ptr = memblock_alloc_from(size, align, goal); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n", cpu, size, (u64)__pa(ptr)); } else { ptr = memblock_alloc_try_nid(size, align, goal, MEMBLOCK_ALLOC_ACCESSIBLE, node); pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n", cpu, size, node, (u64)__pa(ptr)); } return ptr; #else return memblock_alloc_from(size, align, goal); #endif } static void __init pcpu_fc_free(void *ptr, size_t size) { memblock_free(ptr, size); } #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ #if defined(BUILD_EMBED_FIRST_CHUNK) /** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @cpu_to_nd_fn: callback to convert cpu to it's node, optional * * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. * * If this function is used to setup the first chunk, it is allocated * by calling pcpu_fc_alloc and used as-is without being mapped into * vmalloc area. Allocations are always whole multiples of @atom_size * aligned to @atom_size. * * This enables the first chunk to piggy back on the linear physical * mapping which often uses larger page size. Please note that this * can result in very sparse cpu->unit mapping on NUMA machines thus * requiring large vmalloc address space. Don't use this allocator if * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). * * @dyn_size specifies the minimum dynamic area size. * * If the needed size is smaller than the minimum or specified unit * size, the leftover is returned using pcpu_fc_free. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { void *base = (void *)ULONG_MAX; void **areas = NULL; struct pcpu_alloc_info *ai; size_t size_sum, areas_size; unsigned long max_distance; int group, i, highest_group, rc = 0; ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); if (IS_ERR(ai)) return PTR_ERR(ai); size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); if (!areas) { rc = -ENOMEM; goto out_free; } /* allocate, copy and determine base address & max_distance */ highest_group = 0; for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); /* allocate space for the whole group */ ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(ptr)); areas[group] = ptr; base = min(ptr, base); if (ptr > areas[highest_group]) highest_group = group; } max_distance = areas[highest_group] - base; max_distance += ai->unit_size * ai->groups[highest_group].nr_units; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > VMALLOC_TOTAL * 3 / 4) { pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", max_distance, VMALLOC_TOTAL); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; goto out_free_areas; #endif } /* * Copy data and free unused parts. This should happen after all * allocations are complete; otherwise, we may end up with * overlapping groups. */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { /* unused unit, free whole */ pcpu_fc_free(ptr, ai->unit_size); continue; } /* copy and return the unused part */ memcpy(ptr, __per_cpu_load, ai->static_size); pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum); } } /* base address is now known, determine group base offsets */ for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; } pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); pcpu_setup_first_chunk(ai, base); goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) if (areas[group]) pcpu_fc_free(areas[group], ai->groups[group].nr_units * ai->unit_size); out_free: pcpu_free_alloc_info(ai); if (areas) memblock_free(areas, areas_size); return rc; } #endif /* BUILD_EMBED_FIRST_CHUNK */ #ifdef BUILD_PAGE_FIRST_CHUNK #include <asm/pgalloc.h> #ifndef P4D_TABLE_SIZE #define P4D_TABLE_SIZE PAGE_SIZE #endif #ifndef PUD_TABLE_SIZE #define PUD_TABLE_SIZE PAGE_SIZE #endif #ifndef PMD_TABLE_SIZE #define PMD_TABLE_SIZE PAGE_SIZE #endif #ifndef PTE_TABLE_SIZE #define PTE_TABLE_SIZE PAGE_SIZE #endif void __init __weak pcpu_populate_pte(unsigned long addr) { pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; if (pgd_none(*pgd)) { p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE); pgd_populate(&init_mm, pgd, p4d); } p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { pud = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4d, pud); } pud = pud_offset(p4d, addr); if (pud_none(*pud)) { pmd = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE); pud_populate(&init_mm, pud, pmd); } pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) { pte_t *new; new = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmd, new); } return; } /** * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * @reserved_size: the size of reserved percpu area in bytes * @cpu_to_nd_fn: callback to convert cpu to it's node, optional * * This is a helper to ease setting up page-remapped first percpu * chunk and can be called where pcpu_setup_first_chunk() is expected. * * This is the basic allocator. Static percpu area is allocated * page-by-page into vmalloc area. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { static struct vm_struct vm; struct pcpu_alloc_info *ai; char psize_str[16]; int unit_pages; size_t pages_size; struct page **pages; int unit, i, j, rc = 0; int upa; int nr_g0_units; snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); upa = ai->alloc_size/ai->unit_size; nr_g0_units = roundup(num_possible_cpus(), upa); if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { pcpu_free_alloc_info(ai); return -EINVAL; } unit_pages = ai->unit_size >> PAGE_SHIFT; /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); pages = memblock_alloc_or_panic(pages_size, SMP_CACHE_BYTES); /* allocate pages */ j = 0; for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned int cpu = ai->groups[0].cpu_map[unit]; for (i = 0; i < unit_pages; i++) { void *ptr; ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn); if (!ptr) { pr_warn("failed to allocate %s page for cpu%u\n", psize_str, cpu); goto enomem; } /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(ptr)); pages[j++] = virt_to_page(ptr); } } /* allocate vm area, map the pages and copy static data */ vm.flags = VM_ALLOC; vm.size = num_possible_cpus() * ai->unit_size; vm_area_register_early(&vm, PAGE_SIZE); for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned long unit_addr = (unsigned long)vm.addr + unit * ai->unit_size; for (i = 0; i < unit_pages; i++) pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT)); /* pte already populated, the following shouldn't fail */ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size); /* copy static data */ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); } /* we're ready, commit */ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", unit_pages, psize_str, ai->static_size, ai->reserved_size, ai->dyn_size); pcpu_setup_first_chunk(ai, vm.addr); goto out_free_ar; enomem: while (--j >= 0) pcpu_fc_free(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: memblock_free(pages, pages_size); pcpu_free_alloc_info(ai); return rc; } #endif /* BUILD_PAGE_FIRST_CHUNK */ #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Generic SMP percpu area setup. * * The embedding helper is used because its behavior closely resembles * the original non-dynamic generic percpu area setup. This is * important because many archs have addressing restrictions and might * fail if the percpu area is located far away from the previous * location. As an added bonus, in non-NUMA cases, embedding is * generally a good idea TLB-wise because percpu area can piggy back * on the physical linear memory mapping which uses large page * mappings on applicable archs. */ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc; /* * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, NULL); if (rc < 0) panic("Failed to initialize percpu areas."); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #else /* CONFIG_SMP */ /* * UP percpu area setup. * * UP always uses km-based percpu allocator with identity mapping. * Static percpu variables are indistinguishable from the usual static * variables and don't require any special preparation. */ void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(fc)); ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; pcpu_setup_first_chunk(ai, fc); pcpu_free_alloc_info(ai); } #endif /* CONFIG_SMP */ /* * pcpu_nr_pages - calculate total number of populated backing pages * * This reflects the number of pages populated to back chunks. Metadata is * excluded in the number exposed in meminfo as the number of backing pages * scales with the number of cpus and can quickly outweigh the memory used for * metadata. It also keeps this calculation nice and simple. * * RETURNS: * Total number of populated backing pages in use by the allocator. */ unsigned long pcpu_nr_pages(void) { return pcpu_nr_populated * pcpu_nr_units; } /* * Percpu allocator is initialized early during boot when neither slab or * workqueue is available. Plug async management until everything is up * and running. */ static int __init percpu_enable_async(void) { pcpu_async_enabled = true; return 0; } subsys_initcall(percpu_enable_async);
1 1 1 1 1 5 5 4 3 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/unaligned.h> /* * Pressure-threshold modules param code from Alex Perry <alex.perry@ieee.org> */ MODULE_AUTHOR("Josh Myer <josh@joshisanerd.com>"); MODULE_DESCRIPTION("USB KB Gear JamStudio Tablet driver"); MODULE_LICENSE("GPL"); #define USB_VENDOR_ID_KBGEAR 0x084e static int kb_pressure_click = 0x10; module_param(kb_pressure_click, int, 0); MODULE_PARM_DESC(kb_pressure_click, "pressure threshold for clicks"); struct kbtab { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_interface *intf; struct urb *irq; char phys[32]; }; static void kbtab_irq(struct urb *urb) { struct kbtab *kbtab = urb->context; unsigned char *data = kbtab->data; struct input_dev *dev = kbtab->dev; int pressure; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&kbtab->intf->dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(&kbtab->intf->dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } input_report_key(dev, BTN_TOOL_PEN, 1); input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3])); /*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/ input_report_key(dev, BTN_RIGHT, data[0] & 0x02); pressure = data[5]; if (kb_pressure_click == -1) input_report_abs(dev, ABS_PRESSURE, pressure); else input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0); input_sync(dev); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&kbtab->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static const struct usb_device_id kbtab_ids[] = { { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, { } }; MODULE_DEVICE_TABLE(usb, kbtab_ids); static int kbtab_open(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); struct usb_device *udev = interface_to_usbdev(kbtab->intf); kbtab->irq->dev = udev; if (usb_submit_urb(kbtab->irq, GFP_KERNEL)) return -EIO; return 0; } static void kbtab_close(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); usb_kill_urb(kbtab->irq); } static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct kbtab *kbtab; struct input_dev *input_dev; int error = -ENOMEM; if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; endpoint = &intf->cur_altsetting->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; kbtab = kzalloc(sizeof(*kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) goto fail1; kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma); if (!kbtab->data) goto fail1; kbtab->irq = usb_alloc_urb(0, GFP_KERNEL); if (!kbtab->irq) goto fail2; kbtab->intf = intf; kbtab->dev = input_dev; usb_make_path(dev, kbtab->phys, sizeof(kbtab->phys)); strlcat(kbtab->phys, "/input0", sizeof(kbtab->phys)); input_dev->name = "KB Gear Tablet"; input_dev->phys = kbtab->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, kbtab); input_dev->open = kbtab_open; input_dev->close = kbtab_close; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, kbtab_irq, kbtab, endpoint->bInterval); kbtab->irq->transfer_dma = kbtab->data_dma; kbtab->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(kbtab->dev); if (error) goto fail3; usb_set_intfdata(intf, kbtab); return 0; fail3: usb_free_urb(kbtab->irq); fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma); fail1: input_free_device(input_dev); kfree(kbtab); return error; } static void kbtab_disconnect(struct usb_interface *intf) { struct kbtab *kbtab = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); usb_set_intfdata(intf, NULL); input_unregister_device(kbtab->dev); usb_free_urb(kbtab->irq); usb_free_coherent(udev, 8, kbtab->data, kbtab->data_dma); kfree(kbtab); } static struct usb_driver kbtab_driver = { .name = "kbtab", .probe = kbtab_probe, .disconnect = kbtab_disconnect, .id_table = kbtab_ids, }; module_usb_driver(kbtab_driver);
4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 // SPDX-License-Identifier: GPL-2.0-or-later // // core.c -- Voltage/Current Regulator framework. // // Copyright 2007, 2008 Wolfson Microelectronics PLC. // Copyright 2008 SlimLogic Ltd. // // Author: Liam Girdwood <lrg@slimlogic.co.uk> #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/async.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/reboot.h> #include <linux/regmap.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/consumer.h> #include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/module.h> #define CREATE_TRACE_POINTS #include <trace/events/regulator.h> #include "dummy.h" #include "internal.h" #include "regnl.h" static DEFINE_WW_CLASS(regulator_ww_class); static DEFINE_MUTEX(regulator_nesting_mutex); static DEFINE_MUTEX(regulator_list_mutex); static LIST_HEAD(regulator_map_list); static LIST_HEAD(regulator_ena_gpio_list); static LIST_HEAD(regulator_supply_alias_list); static LIST_HEAD(regulator_coupler_list); static bool has_full_constraints; static struct dentry *debugfs_root; /* * struct regulator_map * * Used to provide symbolic supply names to devices. */ struct regulator_map { struct list_head list; const char *dev_name; /* The dev_name() for the consumer */ const char *supply; struct regulator_dev *regulator; }; /* * struct regulator_enable_gpio * * Management for shared enable GPIO pin */ struct regulator_enable_gpio { struct list_head list; struct gpio_desc *gpiod; u32 enable_count; /* a number of enabled shared GPIO */ u32 request_count; /* a number of requested shared GPIO */ }; /* * struct regulator_supply_alias * * Used to map lookups for a supply onto an alternative device. */ struct regulator_supply_alias { struct list_head list; struct device *src_dev; const char *src_supply; struct device *alias_dev; const char *alias_supply; }; static int _regulator_is_enabled(struct regulator_dev *rdev); static int _regulator_disable(struct regulator *regulator); static int _regulator_get_error_flags(struct regulator_dev *rdev, unsigned int *flags); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); static int regulator_balance_voltage(struct regulator_dev *rdev, suspend_state_t state); static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); static void destroy_regulator(struct regulator *regulator); static void _regulator_put(struct regulator *regulator); const char *rdev_get_name(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->name) return rdev->constraints->name; else if (rdev->desc->name) return rdev->desc->name; else return ""; } EXPORT_SYMBOL_GPL(rdev_get_name); static bool have_full_constraints(void) { return has_full_constraints || of_have_populated_dt(); } static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops) { if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return false; } if (rdev->constraints->valid_ops_mask & ops) return true; return false; } /** * regulator_lock_nested - lock a single regulator * @rdev: regulator source * @ww_ctx: w/w mutex acquire context * * This function can be called many times by one task on * a single regulator and its mutex will be locked only * once. If a task, which is calling this function is other * than the one, which initially locked the mutex, it will * wait on mutex. * * Return: 0 on success or a negative error number on failure. */ static inline int regulator_lock_nested(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) { bool lock = false; int ret = 0; mutex_lock(&regulator_nesting_mutex); if (!ww_mutex_trylock(&rdev->mutex, ww_ctx)) { if (rdev->mutex_owner == current) rdev->ref_cnt++; else lock = true; if (lock) { mutex_unlock(&regulator_nesting_mutex); ret = ww_mutex_lock(&rdev->mutex, ww_ctx); mutex_lock(&regulator_nesting_mutex); } } else { lock = true; } if (lock && ret != -EDEADLK) { rdev->ref_cnt++; rdev->mutex_owner = current; } mutex_unlock(&regulator_nesting_mutex); return ret; } /** * regulator_lock - lock a single regulator * @rdev: regulator source * * This function can be called many times by one task on * a single regulator and its mutex will be locked only * once. If a task, which is calling this function is other * than the one, which initially locked the mutex, it will * wait on mutex. */ static void regulator_lock(struct regulator_dev *rdev) { regulator_lock_nested(rdev, NULL); } /** * regulator_unlock - unlock a single regulator * @rdev: regulator_source * * This function unlocks the mutex when the * reference counter reaches 0. */ static void regulator_unlock(struct regulator_dev *rdev) { mutex_lock(&regulator_nesting_mutex); if (--rdev->ref_cnt == 0) { rdev->mutex_owner = NULL; ww_mutex_unlock(&rdev->mutex); } WARN_ON_ONCE(rdev->ref_cnt < 0); mutex_unlock(&regulator_nesting_mutex); } /** * regulator_lock_two - lock two regulators * @rdev1: first regulator * @rdev2: second regulator * @ww_ctx: w/w mutex acquire context * * Locks both rdevs using the regulator_ww_class. */ static void regulator_lock_two(struct regulator_dev *rdev1, struct regulator_dev *rdev2, struct ww_acquire_ctx *ww_ctx) { struct regulator_dev *held, *contended; int ret; ww_acquire_init(ww_ctx, &regulator_ww_class); /* Try to just grab both of them */ ret = regulator_lock_nested(rdev1, ww_ctx); WARN_ON(ret); ret = regulator_lock_nested(rdev2, ww_ctx); if (ret != -EDEADLOCK) { WARN_ON(ret); goto exit; } held = rdev1; contended = rdev2; while (true) { regulator_unlock(held); ww_mutex_lock_slow(&contended->mutex, ww_ctx); contended->ref_cnt++; contended->mutex_owner = current; swap(held, contended); ret = regulator_lock_nested(contended, ww_ctx); if (ret != -EDEADLOCK) { WARN_ON(ret); break; } } exit: ww_acquire_done(ww_ctx); } /** * regulator_unlock_two - unlock two regulators * @rdev1: first regulator * @rdev2: second regulator * @ww_ctx: w/w mutex acquire context * * The inverse of regulator_lock_two(). */ static void regulator_unlock_two(struct regulator_dev *rdev1, struct regulator_dev *rdev2, struct ww_acquire_ctx *ww_ctx) { regulator_unlock(rdev2); regulator_unlock(rdev1); ww_acquire_fini(ww_ctx); } static bool regulator_supply_is_couple(struct regulator_dev *rdev) { struct regulator_dev *c_rdev; int i; for (i = 1; i < rdev->coupling_desc.n_coupled; i++) { c_rdev = rdev->coupling_desc.coupled_rdevs[i]; if (rdev->supply->rdev == c_rdev) return true; } return false; } static void regulator_unlock_recursive(struct regulator_dev *rdev, unsigned int n_coupled) { struct regulator_dev *c_rdev, *supply_rdev; int i, supply_n_coupled; for (i = n_coupled; i > 0; i--) { c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1]; if (!c_rdev) continue; if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { supply_rdev = c_rdev->supply->rdev; supply_n_coupled = supply_rdev->coupling_desc.n_coupled; regulator_unlock_recursive(supply_rdev, supply_n_coupled); } regulator_unlock(c_rdev); } } static int regulator_lock_recursive(struct regulator_dev *rdev, struct regulator_dev **new_contended_rdev, struct regulator_dev **old_contended_rdev, struct ww_acquire_ctx *ww_ctx) { struct regulator_dev *c_rdev; int i, err; for (i = 0; i < rdev->coupling_desc.n_coupled; i++) { c_rdev = rdev->coupling_desc.coupled_rdevs[i]; if (!c_rdev) continue; if (c_rdev != *old_contended_rdev) { err = regulator_lock_nested(c_rdev, ww_ctx); if (err) { if (err == -EDEADLK) { *new_contended_rdev = c_rdev; goto err_unlock; } /* shouldn't happen */ WARN_ON_ONCE(err != -EALREADY); } } else { *old_contended_rdev = NULL; } if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { err = regulator_lock_recursive(c_rdev->supply->rdev, new_contended_rdev, old_contended_rdev, ww_ctx); if (err) { regulator_unlock(c_rdev); goto err_unlock; } } } return 0; err_unlock: regulator_unlock_recursive(rdev, i); return err; } /** * regulator_unlock_dependent - unlock regulator's suppliers and coupled * regulators * @rdev: regulator source * @ww_ctx: w/w mutex acquire context * * Unlock all regulators related with rdev by coupling or supplying. */ static void regulator_unlock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) { regulator_unlock_recursive(rdev, rdev->coupling_desc.n_coupled); ww_acquire_fini(ww_ctx); } /** * regulator_lock_dependent - lock regulator's suppliers and coupled regulators * @rdev: regulator source * @ww_ctx: w/w mutex acquire context * * This function as a wrapper on regulator_lock_recursive(), which locks * all regulators related with rdev by coupling or supplying. */ static void regulator_lock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) { struct regulator_dev *new_contended_rdev = NULL; struct regulator_dev *old_contended_rdev = NULL; int err; mutex_lock(&regulator_list_mutex); ww_acquire_init(ww_ctx, &regulator_ww_class); do { if (new_contended_rdev) { ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx); old_contended_rdev = new_contended_rdev; old_contended_rdev->ref_cnt++; old_contended_rdev->mutex_owner = current; } err = regulator_lock_recursive(rdev, &new_contended_rdev, &old_contended_rdev, ww_ctx); if (old_contended_rdev) regulator_unlock(old_contended_rdev); } while (err == -EDEADLK); ww_acquire_done(ww_ctx); mutex_unlock(&regulator_list_mutex); } /* Platform voltage constraint check */ int regulator_check_voltage(struct regulator_dev *rdev, int *min_uV, int *max_uV) { BUG_ON(*min_uV > *max_uV); if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) { rdev_err(rdev, "voltage operation not allowed\n"); return -EPERM; } if (*max_uV > rdev->constraints->max_uV) *max_uV = rdev->constraints->max_uV; if (*min_uV < rdev->constraints->min_uV) *min_uV = rdev->constraints->min_uV; if (*min_uV > *max_uV) { rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* return 0 if the state is valid */ static int regulator_check_states(suspend_state_t state) { return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE); } /* Make sure we select a voltage that suits the needs of all * regulator consumers */ int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV, suspend_state_t state) { struct regulator *regulator; struct regulator_voltage *voltage; list_for_each_entry(regulator, &rdev->consumer_list, list) { voltage = &regulator->voltage[state]; /* * Assume consumers that didn't say anything are OK * with anything in the constraint range. */ if (!voltage->min_uV && !voltage->max_uV) continue; if (*max_uV > voltage->max_uV) *max_uV = voltage->max_uV; if (*min_uV < voltage->min_uV) *min_uV = voltage->min_uV; } if (*min_uV > *max_uV) { rdev_err(rdev, "Restricting voltage, %u-%uuV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* current constraint check */ static int regulator_check_current_limit(struct regulator_dev *rdev, int *min_uA, int *max_uA) { BUG_ON(*min_uA > *max_uA); if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_CURRENT)) { rdev_err(rdev, "current operation not allowed\n"); return -EPERM; } if (*max_uA > rdev->constraints->max_uA && rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; if (*min_uA > *max_uA) { rdev_err(rdev, "unsupportable current range: %d-%duA\n", *min_uA, *max_uA); return -EINVAL; } return 0; } /* operating mode constraint check */ static int regulator_mode_constrain(struct regulator_dev *rdev, unsigned int *mode) { switch (*mode) { case REGULATOR_MODE_FAST: case REGULATOR_MODE_NORMAL: case REGULATOR_MODE_IDLE: case REGULATOR_MODE_STANDBY: break; default: rdev_err(rdev, "invalid mode %x specified\n", *mode); return -EINVAL; } if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_MODE)) { rdev_err(rdev, "mode operation not allowed\n"); return -EPERM; } /* The modes are bitmasks, the most power hungry modes having * the lowest values. If the requested mode isn't supported * try higher modes. */ while (*mode) { if (rdev->constraints->valid_modes_mask & *mode) return 0; *mode /= 2; } return -EINVAL; } static inline struct regulator_state * regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state) { if (rdev->constraints == NULL) return NULL; switch (state) { case PM_SUSPEND_STANDBY: return &rdev->constraints->state_standby; case PM_SUSPEND_MEM: return &rdev->constraints->state_mem; case PM_SUSPEND_MAX: return &rdev->constraints->state_disk; default: return NULL; } } static const struct regulator_state * regulator_get_suspend_state_check(struct regulator_dev *rdev, suspend_state_t state) { const struct regulator_state *rstate; rstate = regulator_get_suspend_state(rdev, state); if (rstate == NULL) return NULL; /* If we have no suspend mode configuration don't set anything; * only warn if the driver implements set_suspend_voltage or * set_suspend_mode callback. */ if (rstate->enabled != ENABLE_IN_SUSPEND && rstate->enabled != DISABLE_IN_SUSPEND) { if (rdev->desc->ops->set_suspend_voltage || rdev->desc->ops->set_suspend_mode) rdev_warn(rdev, "No configuration\n"); return NULL; } return rstate; } static ssize_t microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); int uV; regulator_lock(rdev); uV = regulator_get_voltage_rdev(rdev); regulator_unlock(rdev); if (uV < 0) return uV; return sprintf(buf, "%d\n", uV); } static DEVICE_ATTR_RO(microvolts); static ssize_t microamps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev)); } static DEVICE_ATTR_RO(microamps); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", rdev_get_name(rdev)); } static DEVICE_ATTR_RO(name); static const char *regulator_opmode_to_str(int mode) { switch (mode) { case REGULATOR_MODE_FAST: return "fast"; case REGULATOR_MODE_NORMAL: return "normal"; case REGULATOR_MODE_IDLE: return "idle"; case REGULATOR_MODE_STANDBY: return "standby"; } return "unknown"; } static ssize_t regulator_print_opmode(char *buf, int mode) { return sprintf(buf, "%s\n", regulator_opmode_to_str(mode)); } static ssize_t opmode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, _regulator_get_mode(rdev)); } static DEVICE_ATTR_RO(opmode); static ssize_t regulator_print_state(char *buf, int state) { if (state > 0) return sprintf(buf, "enabled\n"); else if (state == 0) return sprintf(buf, "disabled\n"); else return sprintf(buf, "unknown\n"); } static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; regulator_lock(rdev); ret = regulator_print_state(buf, _regulator_is_enabled(rdev)); regulator_unlock(rdev); return ret; } static DEVICE_ATTR_RO(state); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); int status; char *label; status = rdev->desc->ops->get_status(rdev); if (status < 0) return status; switch (status) { case REGULATOR_STATUS_OFF: label = "off"; break; case REGULATOR_STATUS_ON: label = "on"; break; case REGULATOR_STATUS_ERROR: label = "error"; break; case REGULATOR_STATUS_FAST: label = "fast"; break; case REGULATOR_STATUS_NORMAL: label = "normal"; break; case REGULATOR_STATUS_IDLE: label = "idle"; break; case REGULATOR_STATUS_STANDBY: label = "standby"; break; case REGULATOR_STATUS_BYPASS: label = "bypass"; break; case REGULATOR_STATUS_UNDEFINED: label = "undefined"; break; default: return -ERANGE; } return sprintf(buf, "%s\n", label); } static DEVICE_ATTR_RO(status); static ssize_t min_microamps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uA); } static DEVICE_ATTR_RO(min_microamps); static ssize_t max_microamps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uA); } static DEVICE_ATTR_RO(max_microamps); static ssize_t min_microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uV); } static DEVICE_ATTR_RO(min_microvolts); static ssize_t max_microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uV); } static DEVICE_ATTR_RO(max_microvolts); static ssize_t requested_microamps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); struct regulator *regulator; int uA = 0; regulator_lock(rdev); list_for_each_entry(regulator, &rdev->consumer_list, list) { if (regulator->enable_count) uA += regulator->uA_load; } regulator_unlock(rdev); return sprintf(buf, "%d\n", uA); } static DEVICE_ATTR_RO(requested_microamps); static ssize_t num_users_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->use_count); } static DEVICE_ATTR_RO(num_users); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); switch (rdev->desc->type) { case REGULATOR_VOLTAGE: return sprintf(buf, "voltage\n"); case REGULATOR_CURRENT: return sprintf(buf, "current\n"); } return sprintf(buf, "unknown\n"); } static DEVICE_ATTR_RO(type); static ssize_t suspend_mem_microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV); } static DEVICE_ATTR_RO(suspend_mem_microvolts); static ssize_t suspend_disk_microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV); } static DEVICE_ATTR_RO(suspend_disk_microvolts); static ssize_t suspend_standby_microvolts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV); } static DEVICE_ATTR_RO(suspend_standby_microvolts); static ssize_t suspend_mem_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_mem.mode); } static DEVICE_ATTR_RO(suspend_mem_mode); static ssize_t suspend_disk_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_disk.mode); } static DEVICE_ATTR_RO(suspend_disk_mode); static ssize_t suspend_standby_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_standby.mode); } static DEVICE_ATTR_RO(suspend_standby_mode); static ssize_t suspend_mem_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_mem.enabled); } static DEVICE_ATTR_RO(suspend_mem_state); static ssize_t suspend_disk_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_disk.enabled); } static DEVICE_ATTR_RO(suspend_disk_state); static ssize_t suspend_standby_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_standby.enabled); } static DEVICE_ATTR_RO(suspend_standby_state); static ssize_t bypass_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); const char *report; bool bypass; int ret; ret = rdev->desc->ops->get_bypass(rdev, &bypass); if (ret != 0) report = "unknown"; else if (bypass) report = "enabled"; else report = "disabled"; return sprintf(buf, "%s\n", report); } static DEVICE_ATTR_RO(bypass); static ssize_t power_budget_milliwatt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->pw_budget_mW); } static DEVICE_ATTR_RO(power_budget_milliwatt); static ssize_t power_requested_milliwatt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->pw_requested_mW); } static DEVICE_ATTR_RO(power_requested_milliwatt); #define REGULATOR_ERROR_ATTR(name, bit) \ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ int ret; \ unsigned int flags; \ struct regulator_dev *rdev = dev_get_drvdata(dev); \ ret = _regulator_get_error_flags(rdev, &flags); \ if (ret) \ return ret; \ return sysfs_emit(buf, "%d\n", !!(flags & (bit))); \ } \ static DEVICE_ATTR_RO(name) REGULATOR_ERROR_ATTR(under_voltage, REGULATOR_ERROR_UNDER_VOLTAGE); REGULATOR_ERROR_ATTR(over_current, REGULATOR_ERROR_OVER_CURRENT); REGULATOR_ERROR_ATTR(regulation_out, REGULATOR_ERROR_REGULATION_OUT); REGULATOR_ERROR_ATTR(fail, REGULATOR_ERROR_FAIL); REGULATOR_ERROR_ATTR(over_temp, REGULATOR_ERROR_OVER_TEMP); REGULATOR_ERROR_ATTR(under_voltage_warn, REGULATOR_ERROR_UNDER_VOLTAGE_WARN); REGULATOR_ERROR_ATTR(over_current_warn, REGULATOR_ERROR_OVER_CURRENT_WARN); REGULATOR_ERROR_ATTR(over_voltage_warn, REGULATOR_ERROR_OVER_VOLTAGE_WARN); REGULATOR_ERROR_ATTR(over_temp_warn, REGULATOR_ERROR_OVER_TEMP_WARN); /* Calculate the new optimum regulator operating mode based on the new total * consumer load. All locks held by caller */ static int drms_uA_update(struct regulator_dev *rdev) { struct regulator *sibling; int current_uA = 0, output_uV, input_uV, err; unsigned int mode; /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) { rdev_dbg(rdev, "DRMS operation not allowed\n"); return 0; } if (!rdev->desc->ops->get_optimum_mode && !rdev->desc->ops->set_load) return 0; if (!rdev->desc->ops->set_mode && !rdev->desc->ops->set_load) return -EINVAL; /* calc total requested load */ list_for_each_entry(sibling, &rdev->consumer_list, list) { if (sibling->enable_count) current_uA += sibling->uA_load; } current_uA += rdev->constraints->system_load; if (rdev->desc->ops->set_load) { /* set the optimum mode for our new total regulator load */ err = rdev->desc->ops->set_load(rdev, current_uA); if (err < 0) rdev_err(rdev, "failed to set load %d: %pe\n", current_uA, ERR_PTR(err)); } else { /* * Unfortunately in some cases the constraints->valid_ops has * REGULATOR_CHANGE_DRMS but there are no valid modes listed. * That's not really legit but we won't consider it a fatal * error here. We'll treat it as if REGULATOR_CHANGE_DRMS * wasn't set. */ if (!rdev->constraints->valid_modes_mask) { rdev_dbg(rdev, "Can change modes; but no valid mode\n"); return 0; } /* get output voltage */ output_uV = regulator_get_voltage_rdev(rdev); /* * Don't return an error; if regulator driver cares about * output_uV then it's up to the driver to validate. */ if (output_uV <= 0) rdev_dbg(rdev, "invalid output voltage found\n"); /* get input voltage */ input_uV = 0; if (rdev->supply) input_uV = regulator_get_voltage_rdev(rdev->supply->rdev); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; /* * Don't return an error; if regulator driver cares about * input_uV then it's up to the driver to validate. */ if (input_uV <= 0) rdev_dbg(rdev, "invalid input voltage found\n"); /* now get the optimum mode for our new total regulator load */ mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, current_uA); /* check the new mode is allowed */ err = regulator_mode_constrain(rdev, &mode); if (err < 0) { rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV: %pe\n", current_uA, input_uV, output_uV, ERR_PTR(err)); return err; } err = rdev->desc->ops->set_mode(rdev, mode); if (err < 0) rdev_err(rdev, "failed to set optimum mode %x: %pe\n", mode, ERR_PTR(err)); } return err; } static int __suspend_set_state(struct regulator_dev *rdev, const struct regulator_state *rstate) { int ret = 0; if (rstate->enabled == ENABLE_IN_SUSPEND && rdev->desc->ops->set_suspend_enable) ret = rdev->desc->ops->set_suspend_enable(rdev); else if (rstate->enabled == DISABLE_IN_SUSPEND && rdev->desc->ops->set_suspend_disable) ret = rdev->desc->ops->set_suspend_disable(rdev); else /* OK if set_suspend_enable or set_suspend_disable is NULL */ ret = 0; if (ret < 0) { rdev_err(rdev, "failed to enabled/disable: %pe\n", ERR_PTR(ret)); return ret; } if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) { ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV); if (ret < 0) { rdev_err(rdev, "failed to set voltage: %pe\n", ERR_PTR(ret)); return ret; } } if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) { ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode); if (ret < 0) { rdev_err(rdev, "failed to set mode: %pe\n", ERR_PTR(ret)); return ret; } } return ret; } static int suspend_set_initial_state(struct regulator_dev *rdev) { const struct regulator_state *rstate; rstate = regulator_get_suspend_state_check(rdev, rdev->constraints->initial_state); if (!rstate) return 0; return __suspend_set_state(rdev, rstate); } #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) static void print_constraints_debug(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; char buf[160] = ""; size_t len = sizeof(buf) - 1; int count = 0; int ret; if (constraints->min_uV && constraints->max_uV) { if (constraints->min_uV == constraints->max_uV) count += scnprintf(buf + count, len - count, "%d mV ", constraints->min_uV / 1000); else count += scnprintf(buf + count, len - count, "%d <--> %d mV ", constraints->min_uV / 1000, constraints->max_uV / 1000); } if (!constraints->min_uV || constraints->min_uV != constraints->max_uV) { ret = regulator_get_voltage_rdev(rdev); if (ret > 0) count += scnprintf(buf + count, len - count, "at %d mV ", ret / 1000); } if (constraints->uV_offset) count += scnprintf(buf + count, len - count, "%dmV offset ", constraints->uV_offset / 1000); if (constraints->min_uA && constraints->max_uA) { if (constraints->min_uA == constraints->max_uA) count += scnprintf(buf + count, len - count, "%d mA ", constraints->min_uA / 1000); else count += scnprintf(buf + count, len - count, "%d <--> %d mA ", constraints->min_uA / 1000, constraints->max_uA / 1000); } if (!constraints->min_uA || constraints->min_uA != constraints->max_uA) { ret = _regulator_get_current_limit(rdev); if (ret > 0) count += scnprintf(buf + count, len - count, "at %d mA ", ret / 1000); } if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) count += scnprintf(buf + count, len - count, "fast "); if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) count += scnprintf(buf + count, len - count, "normal "); if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE) count += scnprintf(buf + count, len - count, "idle "); if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += scnprintf(buf + count, len - count, "standby "); if (constraints->pw_budget_mW) count += scnprintf(buf + count, len - count, "%d mW budget", constraints->pw_budget_mW); if (!count) count = scnprintf(buf, len, "no parameters"); else --count; count += scnprintf(buf + count, len - count, ", %s", _regulator_is_enabled(rdev) ? "enabled" : "disabled"); rdev_dbg(rdev, "%s\n", buf); } #else /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */ static inline void print_constraints_debug(struct regulator_dev *rdev) {} #endif /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */ static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; print_constraints_debug(rdev); if ((constraints->min_uV != constraints->max_uV) && !regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) rdev_warn(rdev, "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n"); } static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; /* do we need to apply the constraint voltage */ if (rdev->constraints->apply_uV && rdev->constraints->min_uV && rdev->constraints->max_uV) { int target_min, target_max; int current_uV = regulator_get_voltage_rdev(rdev); if (current_uV == -ENOTRECOVERABLE) { /* This regulator can't be read and must be initialized */ rdev_info(rdev, "Setting %d-%duV\n", rdev->constraints->min_uV, rdev->constraints->max_uV); _regulator_do_set_voltage(rdev, rdev->constraints->min_uV, rdev->constraints->max_uV); current_uV = regulator_get_voltage_rdev(rdev); } if (current_uV < 0) { if (current_uV != -EPROBE_DEFER) rdev_err(rdev, "failed to get the current voltage: %pe\n", ERR_PTR(current_uV)); return current_uV; } /* * If we're below the minimum voltage move up to the * minimum voltage, if we're above the maximum voltage * then move down to the maximum. */ target_min = current_uV; target_max = current_uV; if (current_uV < rdev->constraints->min_uV) { target_min = rdev->constraints->min_uV; target_max = rdev->constraints->min_uV; } if (current_uV > rdev->constraints->max_uV) { target_min = rdev->constraints->max_uV; target_max = rdev->constraints->max_uV; } if (target_min != current_uV || target_max != current_uV) { rdev_info(rdev, "Bringing %duV into %d-%duV\n", current_uV, target_min, target_max); ret = _regulator_do_set_voltage( rdev, target_min, target_max); if (ret < 0) { rdev_err(rdev, "failed to apply %d-%duV constraint: %pe\n", target_min, target_max, ERR_PTR(ret)); return ret; } } } /* constrain machine-level voltage specs to fit * the actual range supported by this regulator. */ if (ops->list_voltage && rdev->desc->n_voltages) { int count = rdev->desc->n_voltages; int i; int min_uV = INT_MAX; int max_uV = INT_MIN; int cmin = constraints->min_uV; int cmax = constraints->max_uV; /* it's safe to autoconfigure fixed-voltage supplies * and the constraints are used by list_voltage. */ if (count == 1 && !cmin) { cmin = 1; cmax = INT_MAX; constraints->min_uV = cmin; constraints->max_uV = cmax; } /* voltage constraints are optional */ if ((cmin == 0) && (cmax == 0)) return 0; /* else require explicit machine-level constraints */ if (cmin <= 0 || cmax <= 0 || cmax < cmin) { rdev_err(rdev, "invalid voltage constraints\n"); return -EINVAL; } /* no need to loop voltages if range is continuous */ if (rdev->desc->continuous_voltage_range) return 0; /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; value = ops->list_voltage(rdev, i); if (value <= 0) continue; /* maybe adjust [min_uV..max_uV] */ if (value >= cmin && value < min_uV) min_uV = value; if (value <= cmax && value > max_uV) max_uV = value; } /* final: [min_uV..max_uV] valid iff constraints valid */ if (max_uV < min_uV) { rdev_err(rdev, "unsupportable voltage constraints %u-%uuV\n", min_uV, max_uV); return -EINVAL; } /* use regulator's subset of machine constraints */ if (constraints->min_uV < min_uV) { rdev_dbg(rdev, "override min_uV, %d -> %d\n", constraints->min_uV, min_uV); constraints->min_uV = min_uV; } if (constraints->max_uV > max_uV) { rdev_dbg(rdev, "override max_uV, %d -> %d\n", constraints->max_uV, max_uV); constraints->max_uV = max_uV; } } return 0; } static int machine_constraints_current(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; if (!constraints->min_uA && !constraints->max_uA) return 0; if (constraints->min_uA > constraints->max_uA) { rdev_err(rdev, "Invalid current constraints\n"); return -EINVAL; } if (!ops->set_current_limit || !ops->get_current_limit) { rdev_warn(rdev, "Operation of current configuration missing\n"); return 0; } /* Set regulator current in constraints range */ ret = ops->set_current_limit(rdev, constraints->min_uA, constraints->max_uA); if (ret < 0) { rdev_err(rdev, "Failed to set current constraint, %d\n", ret); return ret; } return 0; } static int _regulator_do_enable(struct regulator_dev *rdev); static int notif_set_limit(struct regulator_dev *rdev, int (*set)(struct regulator_dev *, int, int, bool), int limit, int severity) { bool enable; if (limit == REGULATOR_NOTIF_LIMIT_DISABLE) { enable = false; limit = 0; } else { enable = true; } if (limit == REGULATOR_NOTIF_LIMIT_ENABLE) limit = 0; return set(rdev, limit, severity, enable); } static int handle_notify_limits(struct regulator_dev *rdev, int (*set)(struct regulator_dev *, int, int, bool), struct notification_limit *limits) { int ret = 0; if (!set) return -EOPNOTSUPP; if (limits->prot) ret = notif_set_limit(rdev, set, limits->prot, REGULATOR_SEVERITY_PROT); if (ret) return ret; if (limits->err) ret = notif_set_limit(rdev, set, limits->err, REGULATOR_SEVERITY_ERR); if (ret) return ret; if (limits->warn) ret = notif_set_limit(rdev, set, limits->warn, REGULATOR_SEVERITY_WARN); return ret; } /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source * * Allows platform initialisation code to define and constrain * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: * Constraints *must* be set by platform code in order for some * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. * * Return: 0 on success or a negative error number on failure. */ static int set_machine_constraints(struct regulator_dev *rdev) { int ret = 0; const struct regulator_ops *ops = rdev->desc->ops; ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) return ret; ret = machine_constraints_current(rdev, rdev->constraints); if (ret != 0) return ret; if (rdev->constraints->ilim_uA && ops->set_input_current_limit) { ret = ops->set_input_current_limit(rdev, rdev->constraints->ilim_uA); if (ret < 0) { rdev_err(rdev, "failed to set input limit: %pe\n", ERR_PTR(ret)); return ret; } } /* do we need to setup our suspend state */ if (rdev->constraints->initial_state) { ret = suspend_set_initial_state(rdev); if (ret < 0) { rdev_err(rdev, "failed to set suspend state: %pe\n", ERR_PTR(ret)); return ret; } } if (rdev->constraints->initial_mode) { if (!ops->set_mode) { rdev_err(rdev, "no set_mode operation\n"); return -EINVAL; } ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { rdev_err(rdev, "failed to set initial mode: %pe\n", ERR_PTR(ret)); return ret; } } else if (rdev->constraints->system_load) { /* * We'll only apply the initial system load if an * initial mode wasn't specified. */ drms_uA_update(rdev); } if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable) && ops->set_ramp_delay) { ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay); if (ret < 0) { rdev_err(rdev, "failed to set ramp_delay: %pe\n", ERR_PTR(ret)); return ret; } } if (rdev->constraints->pull_down && ops->set_pull_down) { ret = ops->set_pull_down(rdev); if (ret < 0) { rdev_err(rdev, "failed to set pull down: %pe\n", ERR_PTR(ret)); return ret; } } if (rdev->constraints->soft_start && ops->set_soft_start) { ret = ops->set_soft_start(rdev); if (ret < 0) { rdev_err(rdev, "failed to set soft start: %pe\n", ERR_PTR(ret)); return ret; } } /* * Existing logic does not warn if over_current_protection is given as * a constraint but driver does not support that. I think we should * warn about this type of issues as it is possible someone changes * PMIC on board to another type - and the another PMIC's driver does * not support setting protection. Board composer may happily believe * the DT limits are respected - especially if the new PMIC HW also * supports protection but the driver does not. I won't change the logic * without hearing more experienced opinion on this though. * * If warning is seen as a good idea then we can merge handling the * over-curret protection and detection and get rid of this special * handling. */ if (rdev->constraints->over_current_protection && ops->set_over_current_protection) { int lim = rdev->constraints->over_curr_limits.prot; ret = ops->set_over_current_protection(rdev, lim, REGULATOR_SEVERITY_PROT, true); if (ret < 0) { rdev_err(rdev, "failed to set over current protection: %pe\n", ERR_PTR(ret)); return ret; } } if (rdev->constraints->over_current_detection) ret = handle_notify_limits(rdev, ops->set_over_current_protection, &rdev->constraints->over_curr_limits); if (ret) { if (ret != -EOPNOTSUPP) { rdev_err(rdev, "failed to set over current limits: %pe\n", ERR_PTR(ret)); return ret; } rdev_warn(rdev, "IC does not support requested over-current limits\n"); } if (rdev->constraints->over_voltage_detection) ret = handle_notify_limits(rdev, ops->set_over_voltage_protection, &rdev->constraints->over_voltage_limits); if (ret) { if (ret != -EOPNOTSUPP) { rdev_err(rdev, "failed to set over voltage limits %pe\n", ERR_PTR(ret)); return ret; } rdev_warn(rdev, "IC does not support requested over voltage limits\n"); } if (rdev->constraints->under_voltage_detection) ret = handle_notify_limits(rdev, ops->set_under_voltage_protection, &rdev->constraints->under_voltage_limits); if (ret) { if (ret != -EOPNOTSUPP) { rdev_err(rdev, "failed to set under voltage limits %pe\n", ERR_PTR(ret)); return ret; } rdev_warn(rdev, "IC does not support requested under voltage limits\n"); } if (rdev->constraints->over_temp_detection) ret = handle_notify_limits(rdev, ops->set_thermal_protection, &rdev->constraints->temp_limits); if (ret) { if (ret != -EOPNOTSUPP) { rdev_err(rdev, "failed to set temperature limits %pe\n", ERR_PTR(ret)); return ret; } rdev_warn(rdev, "IC does not support requested temperature limits\n"); } if (rdev->constraints->active_discharge && ops->set_active_discharge) { bool ad_state = (rdev->constraints->active_discharge == REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false; ret = ops->set_active_discharge(rdev, ad_state); if (ret < 0) { rdev_err(rdev, "failed to set active discharge: %pe\n", ERR_PTR(ret)); return ret; } } /* * If there is no mechanism for controlling the regulator then * flag it as always_on so we don't end up duplicating checks * for this so much. Note that we could control the state of * a supply to control the output on a regulator that has no * direct control. */ if (!rdev->ena_pin && !ops->enable) { if (rdev->supply_name && !rdev->supply) return -EPROBE_DEFER; if (rdev->supply) rdev->constraints->always_on = rdev->supply->rdev->constraints->always_on; else rdev->constraints->always_on = true; } /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ if (rdev->constraints->always_on || rdev->constraints->boot_on) { /* If we want to enable this regulator, make sure that we know * the supplying regulator. */ if (rdev->supply_name && !rdev->supply) return -EPROBE_DEFER; /* If supplying regulator has already been enabled, * it's not intended to have use_count increment * when rdev is only boot-on. */ if (rdev->supply && (rdev->constraints->always_on || !regulator_is_enabled(rdev->supply))) { ret = regulator_enable(rdev->supply); if (ret < 0) { _regulator_put(rdev->supply); rdev->supply = NULL; return ret; } } ret = _regulator_do_enable(rdev); if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable: %pe\n", ERR_PTR(ret)); return ret; } if (rdev->constraints->always_on) rdev->use_count++; } else if (rdev->desc->off_on_delay) { rdev->last_off = ktime_get(); } if (!rdev->constraints->pw_budget_mW) rdev->constraints->pw_budget_mW = INT_MAX; print_constraints(rdev); return 0; } /** * set_supply - set regulator supply regulator * @rdev: regulator (locked) * @supply_rdev: supply regulator (locked)) * * Called by platform initialisation code to set the supply regulator for this * regulator. This ensures that a regulators supply will also be enabled by the * core if it's child is enabled. * * Return: 0 on success or a negative error number on failure. */ static int set_supply(struct regulator_dev *rdev, struct regulator_dev *supply_rdev) { int err; rdev_dbg(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); if (!try_module_get(supply_rdev->owner)) return -ENODEV; rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { module_put(supply_rdev->owner); err = -ENOMEM; return err; } supply_rdev->open_count++; return 0; } /** * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev_name: dev_name() string for device supply applies to * @supply: symbolic name for supply * * Allows platform initialisation code to map physical regulator * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. * * Return: 0 on success or a negative error number on failure. */ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) return -EINVAL; if (consumer_dev_name != NULL) has_dev = 1; else has_dev = 0; new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); if (new_node == NULL) return -ENOMEM; new_node->regulator = rdev; new_node->supply = supply; if (has_dev) { new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); if (new_node->dev_name == NULL) { kfree(new_node); return -ENOMEM; } } mutex_lock(&regulator_list_mutex); list_for_each_entry(node, &regulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) continue; } else if (node->dev_name || consumer_dev_name) { continue; } if (strcmp(node->supply, supply) != 0) continue; pr_debug("%s: %s/%s is '%s' supply; fail %s/%s\n", consumer_dev_name, dev_name(&node->regulator->dev), node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); goto fail; } list_add(&new_node->list, &regulator_map_list); mutex_unlock(&regulator_list_mutex); return 0; fail: mutex_unlock(&regulator_list_mutex); kfree(new_node->dev_name); kfree(new_node); return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; list_for_each_entry_safe(node, n, &regulator_map_list, list) { if (rdev == node->regulator) { list_del(&node->list); kfree(node->dev_name); kfree(node); } } } #ifdef CONFIG_DEBUG_FS static ssize_t constraint_flags_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const struct regulator *regulator = file->private_data; const struct regulation_constraints *c = regulator->rdev->constraints; char *buf; ssize_t ret; if (!c) return 0; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = snprintf(buf, PAGE_SIZE, "always_on: %u\n" "boot_on: %u\n" "apply_uV: %u\n" "ramp_disable: %u\n" "soft_start: %u\n" "pull_down: %u\n" "over_current_protection: %u\n", c->always_on, c->boot_on, c->apply_uV, c->ramp_disable, c->soft_start, c->pull_down, c->over_current_protection); ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } #endif static const struct file_operations constraint_flags_fops = { #ifdef CONFIG_DEBUG_FS .open = simple_open, .read = constraint_flags_read_file, .llseek = default_llseek, #endif }; #define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name) { struct regulator *regulator; int err = 0; lockdep_assert_held_once(&rdev->mutex.base); if (dev) { char buf[REG_STR_SIZE]; int size; size = snprintf(buf, REG_STR_SIZE, "%s-%s", dev->kobj.name, supply_name); if (size >= REG_STR_SIZE) return NULL; supply_name = kstrdup(buf, GFP_KERNEL); if (supply_name == NULL) return NULL; } else { supply_name = kstrdup_const(supply_name, GFP_KERNEL); if (supply_name == NULL) return NULL; } regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) { kfree_const(supply_name); return NULL; } regulator->rdev = rdev; regulator->supply_name = supply_name; list_add(&regulator->list, &rdev->consumer_list); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj, supply_name); if (err) { rdev_dbg(rdev, "could not add device link %s: %pe\n", dev->kobj.name, ERR_PTR(err)); /* non-fatal */ } } if (err != -EEXIST) { regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs); if (IS_ERR(regulator->debugfs)) { rdev_dbg(rdev, "Failed to create debugfs directory\n"); regulator->debugfs = NULL; } } if (regulator->debugfs) { debugfs_create_u32("uA_load", 0444, regulator->debugfs, &regulator->uA_load); debugfs_create_u32("min_uV", 0444, regulator->debugfs, &regulator->voltage[PM_SUSPEND_ON].min_uV); debugfs_create_u32("max_uV", 0444, regulator->debugfs, &regulator->voltage[PM_SUSPEND_ON].max_uV); debugfs_create_file("constraint_flags", 0444, regulator->debugfs, regulator, &constraint_flags_fops); } /* * Check now if the regulator is an always on regulator - if * it is then we don't need to do nearly so much work for * enable/disable calls. */ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS) && _regulator_is_enabled(rdev)) regulator->always_on = true; return regulator; } static int _regulator_get_enable_time(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->enable_time) return rdev->constraints->enable_time; if (rdev->desc->ops->enable_time) return rdev->desc->ops->enable_time(rdev); return rdev->desc->enable_time; } static struct regulator_supply_alias *regulator_find_supply_alias( struct device *dev, const char *supply) { struct regulator_supply_alias *map; list_for_each_entry(map, &regulator_supply_alias_list, list) if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0) return map; return NULL; } static void regulator_supply_alias(struct device **dev, const char **supply) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(*dev, *supply); if (map) { dev_dbg(*dev, "Mapping supply %s to %s,%s\n", *supply, map->alias_supply, dev_name(map->alias_dev)); *dev = map->alias_dev; *supply = map->alias_supply; } } static int regulator_match(struct device *dev, const void *data) { struct regulator_dev *r = dev_to_rdev(dev); return strcmp(rdev_get_name(r), data) == 0; } static struct regulator_dev *regulator_lookup_by_name(const char *name) { struct device *dev; dev = class_find_device(&regulator_class, NULL, name, regulator_match); return dev ? dev_to_rdev(dev) : NULL; } static struct regulator_dev *regulator_dt_lookup(struct device *dev, const char *supply) { struct regulator_dev *r = NULL; if (dev_of_node(dev)) { r = of_regulator_dev_lookup(dev, dev_of_node(dev), supply); if (PTR_ERR(r) == -ENODEV) r = NULL; } return r; } /** * regulator_dev_lookup - lookup a regulator device. * @dev: device for regulator "consumer". * @supply: Supply name or regulator ID. * * Return: pointer to &struct regulator_dev or ERR_PTR() encoded negative error number. * * If successful, returns a struct regulator_dev that corresponds to the name * @supply and with the embedded struct device refcount incremented by one. * The refcount must be dropped by calling put_device(). * On failure one of the following ERR_PTR() encoded values is returned: * -%ENODEV if lookup fails permanently, -%EPROBE_DEFER if lookup could succeed * in the future. */ static struct regulator_dev *regulator_dev_lookup(struct device *dev, const char *supply) { struct regulator_dev *r = NULL; struct regulator_map *map; const char *devname = NULL; regulator_supply_alias(&dev, &supply); /* first do a dt based lookup */ r = regulator_dt_lookup(dev, supply); if (r) return r; /* if not found, try doing it non-dt way */ if (dev) devname = dev_name(dev); mutex_lock(&regulator_list_mutex); list_for_each_entry(map, &regulator_map_list, list) { /* If the mapping has a device set up it must match */ if (map->dev_name && (!devname || strcmp(map->dev_name, devname))) continue; if (strcmp(map->supply, supply) == 0 && get_device(&map->regulator->dev)) { r = map->regulator; break; } } mutex_unlock(&regulator_list_mutex); if (r) return r; r = regulator_lookup_by_name(supply); if (r) return r; return ERR_PTR(-ENODEV); } static int regulator_resolve_supply(struct regulator_dev *rdev) { struct regulator_dev *r; struct device *dev = rdev->dev.parent; struct ww_acquire_ctx ww_ctx; int ret = 0; /* No supply to resolve? */ if (!rdev->supply_name) return 0; /* Supply already resolved? (fast-path without locking contention) */ if (rdev->supply) return 0; /* first do a dt based lookup on the node described in the virtual * device. */ r = regulator_dt_lookup(&rdev->dev, rdev->supply_name); /* If regulator not found use usual search path in the parent * device. */ if (!r) r = regulator_dev_lookup(dev, rdev->supply_name); if (IS_ERR(r)) { ret = PTR_ERR(r); /* Did the lookup explicitly defer for us? */ if (ret == -EPROBE_DEFER) goto out; if (have_full_constraints()) { r = dummy_regulator_rdev; get_device(&r->dev); } else { dev_err(dev, "Failed to resolve %s-supply for %s\n", rdev->supply_name, rdev->desc->name); ret = -EPROBE_DEFER; goto out; } } if (r == rdev) { dev_err(dev, "Supply for %s (%s) resolved to itself\n", rdev->desc->name, rdev->supply_name); if (!have_full_constraints()) { ret = -EINVAL; goto out; } r = dummy_regulator_rdev; get_device(&r->dev); } /* * If the supply's parent device is not the same as the * regulator's parent device, then ensure the parent device * is bound before we resolve the supply, in case the parent * device get probe deferred and unregisters the supply. */ if (r->dev.parent && r->dev.parent != rdev->dev.parent) { if (!device_is_bound(r->dev.parent)) { put_device(&r->dev); ret = -EPROBE_DEFER; goto out; } } /* Recursively resolve the supply of the supply */ ret = regulator_resolve_supply(r); if (ret < 0) { put_device(&r->dev); goto out; } /* * Recheck rdev->supply with rdev->mutex lock held to avoid a race * between rdev->supply null check and setting rdev->supply in * set_supply() from concurrent tasks. */ regulator_lock_two(rdev, r, &ww_ctx); /* Supply just resolved by a concurrent task? */ if (rdev->supply) { regulator_unlock_two(rdev, r, &ww_ctx); put_device(&r->dev); goto out; } ret = set_supply(rdev, r); if (ret < 0) { regulator_unlock_two(rdev, r, &ww_ctx); put_device(&r->dev); goto out; } regulator_unlock_two(rdev, r, &ww_ctx); /* * In set_machine_constraints() we may have turned this regulator on * but we couldn't propagate to the supply if it hadn't been resolved * yet. Do it now. */ if (rdev->use_count) { ret = regulator_enable(rdev->supply); if (ret < 0) { _regulator_put(rdev->supply); rdev->supply = NULL; goto out; } } out: return ret; } /* common pre-checks for regulator requests */ int _regulator_get_common_check(struct device *dev, const char *id, enum regulator_get_type get_type) { if (get_type >= MAX_GET_TYPE) { dev_err(dev, "invalid type %d in %s\n", get_type, __func__); return -EINVAL; } if (id == NULL) { dev_err(dev, "regulator request with no identifier\n"); return -EINVAL; } return 0; } /** * _regulator_get_common - Common code for regulator requests * @rdev: regulator device pointer as returned by *regulator_dev_lookup() * Its reference count is expected to have been incremented. * @dev: device used for dev_printk messages * @id: Supply name or regulator ID * @get_type: enum regulator_get_type value corresponding to type of request * * Returns: pointer to struct regulator corresponding to @rdev, or ERR_PTR() * encoded error. * * This function should be chained with *regulator_dev_lookup() functions. */ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct device *dev, const char *id, enum regulator_get_type get_type) { struct regulator *regulator; struct device_link *link; int ret; if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); /* * If regulator_dev_lookup() fails with error other * than -ENODEV our job here is done, we simply return it. */ if (ret != -ENODEV) return ERR_PTR(ret); if (!have_full_constraints()) { dev_warn(dev, "incomplete constraints, dummy supplies not allowed (id=%s)\n", id); return ERR_PTR(-ENODEV); } switch (get_type) { case NORMAL_GET: /* * Assume that a regulator is physically present and * enabled, even if it isn't hooked up, and just * provide a dummy. */ dev_warn(dev, "supply %s not found, using dummy regulator\n", id); rdev = dummy_regulator_rdev; get_device(&rdev->dev); break; case EXCLUSIVE_GET: dev_warn(dev, "dummy supplies not allowed for exclusive requests (id=%s)\n", id); fallthrough; default: return ERR_PTR(-ENODEV); } } if (rdev->exclusive) { regulator = ERR_PTR(-EPERM); put_device(&rdev->dev); return regulator; } if (get_type == EXCLUSIVE_GET && rdev->open_count) { regulator = ERR_PTR(-EBUSY); put_device(&rdev->dev); return regulator; } mutex_lock(&regulator_list_mutex); ret = (rdev->coupling_desc.n_resolved != rdev->coupling_desc.n_coupled); mutex_unlock(&regulator_list_mutex); if (ret != 0) { regulator = ERR_PTR(-EPROBE_DEFER); put_device(&rdev->dev); return regulator; } ret = regulator_resolve_supply(rdev); if (ret < 0) { regulator = ERR_PTR(ret); put_device(&rdev->dev); return regulator; } if (!try_module_get(rdev->owner)) { regulator = ERR_PTR(-EPROBE_DEFER); put_device(&rdev->dev); return regulator; } regulator_lock(rdev); regulator = create_regulator(rdev, dev, id); regulator_unlock(rdev); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); module_put(rdev->owner); put_device(&rdev->dev); return regulator; } rdev->open_count++; if (get_type == EXCLUSIVE_GET) { rdev->exclusive = 1; ret = _regulator_is_enabled(rdev); if (ret > 0) { rdev->use_count = 1; regulator->enable_count = 1; /* Propagate the regulator state to its supply */ if (rdev->supply) { ret = regulator_enable(rdev->supply); if (ret < 0) { destroy_regulator(regulator); module_put(rdev->owner); put_device(&rdev->dev); return ERR_PTR(ret); } } } else { rdev->use_count = 0; regulator->enable_count = 0; } } link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS); if (!IS_ERR_OR_NULL(link)) regulator->device_link = true; return regulator; } /* Internal regulator request function */ struct regulator *_regulator_get(struct device *dev, const char *id, enum regulator_get_type get_type) { struct regulator_dev *rdev; int ret; ret = _regulator_get_common_check(dev, id, get_type); if (ret) return ERR_PTR(ret); rdev = regulator_dev_lookup(dev, id); return _regulator_get_common(rdev, dev, id, get_type); } /** * regulator_get - lookup and obtain a reference to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Use of supply names configured via set_consumer_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. * * Return: Pointer to a &struct regulator corresponding to the regulator * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get(struct device *dev, const char *id) { return _regulator_get(dev, id, NORMAL_GET); } EXPORT_SYMBOL_GPL(regulator_get); /** * regulator_get_exclusive - obtain exclusive access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Other consumers will be unable to obtain this regulator while this * reference is held and the use count for the regulator will be * initialised to reflect the current state of the regulator. * * This is intended for use by consumers which cannot tolerate shared * use of the regulator such as those which need to force the * regulator off for correct operation of the hardware they are * controlling. * * Use of supply names configured via set_consumer_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. * * Return: Pointer to a &struct regulator corresponding to the regulator * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { return _regulator_get(dev, id, EXCLUSIVE_GET); } EXPORT_SYMBOL_GPL(regulator_get_exclusive); /** * regulator_get_optional - obtain optional access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * This is intended for use by consumers for devices which can have * some supplies unconnected in normal use, such as some MMC devices. * It can allow the regulator core to provide stub supplies for other * supplies requested using normal regulator_get() calls without * disrupting the operation of drivers that can handle absent * supplies. * * Use of supply names configured via set_consumer_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. * * Return: Pointer to a &struct regulator corresponding to the regulator * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get_optional(struct device *dev, const char *id) { return _regulator_get(dev, id, OPTIONAL_GET); } EXPORT_SYMBOL_GPL(regulator_get_optional); static void destroy_regulator(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); if (regulator->dev) { if (regulator->device_link) device_link_remove(regulator->dev, &rdev->dev); /* remove any sysfs entries */ sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); } regulator_lock(rdev); list_del(&regulator->list); rdev->open_count--; rdev->exclusive = 0; regulator_unlock(rdev); kfree_const(regulator->supply_name); kfree(regulator); } /* regulator_list_mutex lock held by regulator_put() */ static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (IS_ERR_OR_NULL(regulator)) return; lockdep_assert_held_once(&regulator_list_mutex); /* Docs say you must disable before calling regulator_put() */ WARN_ON(regulator->enable_count); rdev = regulator->rdev; destroy_regulator(regulator); module_put(rdev->owner); put_device(&rdev->dev); } /** * regulator_put - "free" the regulator source * @regulator: regulator source * * Note: drivers must ensure that all regulator_enable calls made on this * regulator source are balanced by regulator_disable calls prior to calling * this function. */ void regulator_put(struct regulator *regulator) { mutex_lock(&regulator_list_mutex); _regulator_put(regulator); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); /** * regulator_register_supply_alias - Provide device alias for supply lookup * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * @alias_dev: device that should be used to lookup the supply * @alias_id: Supply name or regulator ID that should be used to lookup the * supply * * All lookups for id on dev will instead be conducted for alias_id on * alias_dev. * * Return: 0 on success or a negative error number on failure. */ int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) return -EEXIST; map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL); if (!map) return -ENOMEM; map->src_dev = dev; map->src_supply = id; map->alias_dev = alias_dev; map->alias_supply = alias_id; list_add(&map->list, &regulator_supply_alias_list); pr_info("Adding alias for supply %s,%s -> %s,%s\n", id, dev_name(dev), alias_id, dev_name(alias_dev)); return 0; } EXPORT_SYMBOL_GPL(regulator_register_supply_alias); /** * regulator_unregister_supply_alias - Remove device alias * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * * Remove a lookup alias if one exists for id on dev. */ void regulator_unregister_supply_alias(struct device *dev, const char *id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) { list_del(&map->list); kfree(map); } } EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias); /** * regulator_bulk_register_supply_alias - register multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @alias_dev: device that should be used to lookup the supply * @alias_id: List of supply names or regulator IDs that should be used to * lookup the supply * @num_id: Number of aliases to register * * This helper function allows drivers to register several supply * aliases in one operation. If any of the aliases cannot be * registered any aliases that were registered will be removed * before returning to the caller. * * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id) { int i; int ret; for (i = 0; i < num_id; ++i) { ret = regulator_register_supply_alias(dev, id[i], alias_dev, alias_id[i]); if (ret < 0) goto err; } return 0; err: dev_err(dev, "Failed to create supply alias %s,%s -> %s,%s\n", id[i], dev_name(dev), alias_id[i], dev_name(alias_dev)); while (--i >= 0) regulator_unregister_supply_alias(dev, id[i]); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias); /** * regulator_bulk_unregister_supply_alias - unregister multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @num_id: Number of aliases to unregister * * This helper function allows drivers to unregister several supply * aliases in one operation. */ void regulator_bulk_unregister_supply_alias(struct device *dev, const char *const *id, int num_id) { int i; for (i = 0; i < num_id; ++i) regulator_unregister_supply_alias(dev, id[i]); } EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); /* Manage enable GPIO list. Same GPIO pin can be shared among regulators */ static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { struct regulator_enable_gpio *pin, *new_pin; struct gpio_desc *gpiod; gpiod = config->ena_gpiod; new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL); mutex_lock(&regulator_list_mutex); list_for_each_entry(pin, &regulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { rdev_dbg(rdev, "GPIO is already used\n"); goto update_ena_gpio_to_rdev; } } if (new_pin == NULL) { mutex_unlock(&regulator_list_mutex); return -ENOMEM; } pin = new_pin; new_pin = NULL; pin->gpiod = gpiod; list_add(&pin->list, &regulator_ena_gpio_list); update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; mutex_unlock(&regulator_list_mutex); kfree(new_pin); return 0; } static void regulator_ena_gpio_free(struct regulator_dev *rdev) { struct regulator_enable_gpio *pin, *n; if (!rdev->ena_pin) return; /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) { if (pin != rdev->ena_pin) continue; if (--pin->request_count) break; gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); break; } rdev->ena_pin = NULL; } /** * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control * @rdev: regulator_dev structure * @enable: enable GPIO at initial use? * * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) * * Return: 0 on success or a negative error number on failure. */ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) { struct regulator_enable_gpio *pin = rdev->ena_pin; if (!pin) return -EINVAL; if (enable) { /* Enable GPIO at initial use */ if (pin->enable_count == 0) gpiod_set_value_cansleep(pin->gpiod, 1); pin->enable_count++; } else { if (pin->enable_count > 1) { pin->enable_count--; return 0; } /* Disable GPIO if not used */ if (pin->enable_count <= 1) { gpiod_set_value_cansleep(pin->gpiod, 0); pin->enable_count = 0; } } return 0; } /** * _regulator_check_status_enabled - check if regulator status can be * interpreted as "regulator is enabled" * @rdev: the regulator device to check * * Return: * * 1 - if status shows regulator is in enabled state * * 0 - if not enabled state * * Error Value - as received from ops->get_status() */ static inline int _regulator_check_status_enabled(struct regulator_dev *rdev) { int ret = rdev->desc->ops->get_status(rdev); if (ret < 0) { rdev_info(rdev, "get_status returned error: %d\n", ret); return ret; } switch (ret) { case REGULATOR_STATUS_OFF: case REGULATOR_STATUS_ERROR: case REGULATOR_STATUS_UNDEFINED: return 0; default: return 1; } } static int _regulator_do_enable(struct regulator_dev *rdev) { int ret, delay; /* Query before enabling in case configuration dependent. */ ret = _regulator_get_enable_time(rdev); if (ret >= 0) { delay = ret; } else { rdev_warn(rdev, "enable_time() failed: %pe\n", ERR_PTR(ret)); delay = 0; } trace_regulator_enable(rdev_get_name(rdev)); if (rdev->desc->off_on_delay) { /* if needed, keep a distance of off_on_delay from last time * this regulator was disabled. */ ktime_t end = ktime_add_us(rdev->last_off, rdev->desc->off_on_delay); s64 remaining = ktime_us_delta(end, ktime_get_boottime()); if (remaining > 0) fsleep(remaining); } if (rdev->ena_pin) { if (!rdev->ena_gpio_state) { ret = regulator_ena_gpio_ctrl(rdev, true); if (ret < 0) return ret; rdev->ena_gpio_state = 1; } } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) return ret; } else { return -EINVAL; } /* Allow the regulator to ramp; it would be useful to extend * this for bulk operations so that the regulators can ramp * together. */ trace_regulator_enable_delay(rdev_get_name(rdev)); /* If poll_enabled_time is set, poll upto the delay calculated * above, delaying poll_enabled_time uS to check if the regulator * actually got enabled. * If the regulator isn't enabled after our delay helper has expired, * return -ETIMEDOUT. */ if (rdev->desc->poll_enabled_time) { int time_remaining = delay; while (time_remaining > 0) { fsleep(rdev->desc->poll_enabled_time); if (rdev->desc->ops->get_status) { ret = _regulator_check_status_enabled(rdev); if (ret < 0) return ret; else if (ret) break; } else if (rdev->desc->ops->is_enabled(rdev)) break; time_remaining -= rdev->desc->poll_enabled_time; } if (time_remaining <= 0) { rdev_err(rdev, "Enabled check timed out\n"); return -ETIMEDOUT; } } else { fsleep(delay); } trace_regulator_enable_complete(rdev_get_name(rdev)); return 0; } /** * _regulator_handle_consumer_enable - handle that a consumer enabled * @regulator: regulator source * * Some things on a regulator consumer (like the contribution towards total * load on the regulator) only have an effect when the consumer wants the * regulator enabled. Explained in example with two consumers of the same * regulator: * consumer A: set_load(100); => total load = 0 * consumer A: regulator_enable(); => total load = 100 * consumer B: set_load(1000); => total load = 100 * consumer B: regulator_enable(); => total load = 1100 * consumer A: regulator_disable(); => total_load = 1000 * * This function (together with _regulator_handle_consumer_disable) is * responsible for keeping track of the refcount for a given regulator consumer * and applying / unapplying these things. * * Return: 0 on success or negative error number on failure. */ static int _regulator_handle_consumer_enable(struct regulator *regulator) { int ret; struct regulator_dev *rdev = regulator->rdev; lockdep_assert_held_once(&rdev->mutex.base); regulator->enable_count++; if (regulator->uA_load && regulator->enable_count == 1) { ret = drms_uA_update(rdev); if (ret) regulator->enable_count--; return ret; } return 0; } /** * _regulator_handle_consumer_disable - handle that a consumer disabled * @regulator: regulator source * * The opposite of _regulator_handle_consumer_enable(). * * Return: 0 on success or a negative error number on failure. */ static int _regulator_handle_consumer_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; lockdep_assert_held_once(&rdev->mutex.base); if (!regulator->enable_count) { rdev_err(rdev, "Underflow of regulator enable count\n"); return -EINVAL; } regulator->enable_count--; if (regulator->uA_load && regulator->enable_count == 0) return drms_uA_update(rdev); return 0; } /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret; lockdep_assert_held_once(&rdev->mutex.base); if (rdev->use_count == 0 && rdev->supply) { ret = _regulator_enable(rdev->supply); if (ret < 0) return ret; } /* balance only if there are regulators coupled */ if (rdev->coupling_desc.n_coupled > 1) { ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON); if (ret < 0) goto err_disable_supply; } ret = _regulator_handle_consumer_enable(regulator); if (ret < 0) goto err_disable_supply; if (rdev->use_count == 0) { /* * The regulator may already be enabled if it's not switchable * or was left on */ ret = _regulator_is_enabled(rdev); if (ret == -EINVAL || ret == 0) { if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) { ret = -EPERM; goto err_consumer_disable; } ret = _regulator_do_enable(rdev); if (ret < 0) goto err_consumer_disable; _notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE, NULL); } else if (ret < 0) { rdev_err(rdev, "is_enabled() failed: %pe\n", ERR_PTR(ret)); goto err_consumer_disable; } /* Fallthrough on positive return values - already enabled */ } if (regulator->enable_count == 1) rdev->use_count++; return 0; err_consumer_disable: _regulator_handle_consumer_disable(regulator); err_disable_supply: if (rdev->use_count == 0 && rdev->supply) _regulator_disable(rdev->supply); return ret; } /** * regulator_enable - enable regulator output * @regulator: regulator source * * Request that the regulator be enabled with the regulator output at * the predefined voltage or current value. Calls to regulator_enable() * must be balanced with calls to regulator_disable(). * * NOTE: the output value can be set by other drivers, boot loader or may be * hardwired in the regulator. * * Return: 0 on success or a negative error number on failure. */ int regulator_enable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; struct ww_acquire_ctx ww_ctx; int ret; regulator_lock_dependent(rdev, &ww_ctx); ret = _regulator_enable(regulator); regulator_unlock_dependent(rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_enable); static int _regulator_do_disable(struct regulator_dev *rdev) { int ret; trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { if (rdev->ena_gpio_state) { ret = regulator_ena_gpio_ctrl(rdev, false); if (ret < 0) return ret; rdev->ena_gpio_state = 0; } } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); if (ret != 0) return ret; } if (rdev->desc->off_on_delay) rdev->last_off = ktime_get_boottime(); trace_regulator_disable_complete(rdev_get_name(rdev)); return 0; } /* locks held by regulator_disable() */ static int _regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; lockdep_assert_held_once(&rdev->mutex.base); if (WARN(regulator->enable_count == 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; if (regulator->enable_count == 1) { /* disabling last enable_count from this regulator */ /* are we the last user and permitted to disable ? */ if (rdev->use_count == 1 && (rdev->constraints && !rdev->constraints->always_on)) { /* we are last user */ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) { ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_DISABLE, NULL); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret)); _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_DISABLE, NULL); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); } rdev->use_count = 0; } else if (rdev->use_count > 1) { rdev->use_count--; } } if (ret == 0) ret = _regulator_handle_consumer_disable(regulator); if (ret == 0 && rdev->coupling_desc.n_coupled > 1) ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON); if (ret == 0 && rdev->use_count == 0 && rdev->supply) ret = _regulator_disable(rdev->supply); return ret; } /** * regulator_disable - disable regulator output * @regulator: regulator source * * Disable the regulator output voltage or current. Calls to * regulator_enable() must be balanced with calls to * regulator_disable(). * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. * * Return: 0 on success or a negative error number on failure. */ int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; struct ww_acquire_ctx ww_ctx; int ret; regulator_lock_dependent(rdev, &ww_ctx); ret = _regulator_disable(regulator); regulator_unlock_dependent(rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; lockdep_assert_held_once(&rdev->mutex.base); ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_PRE_DISABLE, NULL); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to force disable: %pe\n", ERR_PTR(ret)); _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_ABORT_DISABLE, NULL); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_DISABLE, NULL); return 0; } /** * regulator_force_disable - force disable regulator output * @regulator: regulator source * * Forcibly disable the regulator output voltage or current. * NOTE: this *will* disable the regulator output even if other consumer * devices have it enabled. This should be used for situations when device * damage will likely occur if the regulator is not disabled (e.g. over temp). * * Return: 0 on success or a negative error number on failure. */ int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; struct ww_acquire_ctx ww_ctx; int ret; regulator_lock_dependent(rdev, &ww_ctx); ret = _regulator_force_disable(regulator->rdev); if (rdev->coupling_desc.n_coupled > 1) regulator_balance_voltage(rdev, PM_SUSPEND_ON); if (regulator->uA_load) { regulator->uA_load = 0; ret = drms_uA_update(rdev); } if (rdev->use_count != 0 && rdev->supply) _regulator_disable(rdev->supply); regulator_unlock_dependent(rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_force_disable); static void regulator_disable_work(struct work_struct *work) { struct regulator_dev *rdev = container_of(work, struct regulator_dev, disable_work.work); struct ww_acquire_ctx ww_ctx; int count, i, ret; struct regulator *regulator; int total_count = 0; regulator_lock_dependent(rdev, &ww_ctx); /* * Workqueue functions queue the new work instance while the previous * work instance is being processed. Cancel the queued work instance * as the work instance under processing does the job of the queued * work instance. */ cancel_delayed_work(&rdev->disable_work); list_for_each_entry(regulator, &rdev->consumer_list, list) { count = regulator->deferred_disables; if (!count) continue; total_count += count; regulator->deferred_disables = 0; for (i = 0; i < count; i++) { ret = _regulator_disable(regulator); if (ret != 0) rdev_err(rdev, "Deferred disable failed: %pe\n", ERR_PTR(ret)); } } WARN_ON(!total_count); if (rdev->coupling_desc.n_coupled > 1) regulator_balance_voltage(rdev, PM_SUSPEND_ON); regulator_unlock_dependent(rdev, &ww_ctx); } /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source * @ms: milliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. * * Return: 0 on success or a negative error number on failure. */ int regulator_disable_deferred(struct regulator *regulator, int ms) { struct regulator_dev *rdev = regulator->rdev; if (!ms) return regulator_disable(regulator); regulator_lock(rdev); regulator->deferred_disables++; mod_delayed_work(system_power_efficient_wq, &rdev->disable_work, msecs_to_jiffies(ms)); regulator_unlock(rdev); return 0; } EXPORT_SYMBOL_GPL(regulator_disable_deferred); static int _regulator_is_enabled(struct regulator_dev *rdev) { /* A GPIO control always takes precedence */ if (rdev->ena_pin) return rdev->ena_gpio_state; /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) return 1; return rdev->desc->ops->is_enabled(rdev); } static int _regulator_list_voltage(struct regulator_dev *rdev, unsigned selector, int lock) { const struct regulator_ops *ops = rdev->desc->ops; int ret; if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector) return rdev->desc->fixed_uV; if (ops->list_voltage) { if (selector >= rdev->desc->n_voltages) return -EINVAL; if (selector < rdev->desc->linear_min_sel) return 0; if (lock) regulator_lock(rdev); ret = ops->list_voltage(rdev, selector); if (lock) regulator_unlock(rdev); } else if (rdev->is_switch && rdev->supply) { ret = _regulator_list_voltage(rdev->supply->rdev, selector, lock); } else { return -EINVAL; } if (ret > 0) { if (ret < rdev->constraints->min_uV) ret = 0; else if (ret > rdev->constraints->max_uV) ret = 0; } return ret; } /** * regulator_is_enabled - is the regulator output enabled * @regulator: regulator source * * Note that the device backing this regulator handle can have multiple * users, so it might be enabled even if regulator_enable() was never * called for this particular source. * * Return: Positive if the regulator driver backing the source/client * has requested that the device be enabled, zero if it hasn't, * else a negative error number. */ int regulator_is_enabled(struct regulator *regulator) { int ret; if (regulator->always_on) return 1; regulator_lock(regulator->rdev); ret = _regulator_is_enabled(regulator->rdev); regulator_unlock(regulator->rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_is_enabled); /** * regulator_count_voltages - count regulator_list_voltage() selectors * @regulator: regulator source * * Return: Number of selectors for @regulator, or negative error number. * * Selectors are numbered starting at zero, and typically correspond to * bitfields in hardware registers. */ int regulator_count_voltages(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; if (rdev->desc->n_voltages) return rdev->desc->n_voltages; if (!rdev->is_switch || !rdev->supply) return -EINVAL; return regulator_count_voltages(rdev->supply); } EXPORT_SYMBOL_GPL(regulator_count_voltages); /** * regulator_list_voltage - enumerate supported voltages * @regulator: regulator source * @selector: identify voltage to list * Context: can sleep * * Return: Voltage for @selector that can be passed to regulator_set_voltage(), * 0 if @selector can't be used on this system, or a negative error * number on failure. */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { return _regulator_list_voltage(regulator->rdev, selector, 1); } EXPORT_SYMBOL_GPL(regulator_list_voltage); /** * regulator_get_regmap - get the regulator's register map * @regulator: regulator source * * Return: Pointer to the &struct regmap for @regulator, or ERR_PTR() * encoded -%EOPNOTSUPP if @regulator doesn't use regmap. */ struct regmap *regulator_get_regmap(struct regulator *regulator) { struct regmap *map = regulator->rdev->regmap; return map ? map : ERR_PTR(-EOPNOTSUPP); } EXPORT_SYMBOL_GPL(regulator_get_regmap); /** * regulator_get_hardware_vsel_register - get the HW voltage selector register * @regulator: regulator source * @vsel_reg: voltage selector register, output parameter * @vsel_mask: mask for voltage selector bitfield, output parameter * * Returns the hardware register offset and bitmask used for setting the * regulator voltage. This might be useful when configuring voltage-scaling * hardware or firmware that can make I2C requests behind the kernel's back, * for example. * * Return: 0 on success, or -%EOPNOTSUPP if the regulator does not support * voltage selectors. * * On success, the output parameters @vsel_reg and @vsel_mask are filled in * and 0 is returned, otherwise a negative error number is returned. */ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_reg, unsigned *vsel_mask) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; *vsel_reg = rdev->desc->vsel_reg; *vsel_mask = rdev->desc->vsel_mask; return 0; } EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register); /** * regulator_list_hardware_vsel - get the HW-specific register value for a selector * @regulator: regulator source * @selector: identify voltage to list * * Converts the selector to a hardware-specific voltage selector that can be * directly written to the regulator registers. The address of the voltage * register can be determined by calling @regulator_get_hardware_vsel_register. * * Return: 0 on success, -%EINVAL if the selector is outside the supported * range, or -%EOPNOTSUPP if the regulator does not support voltage * selectors. */ int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (selector >= rdev->desc->n_voltages) return -EINVAL; if (selector < rdev->desc->linear_min_sel) return 0; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; return selector; } EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel); /** * regulator_hardware_enable - access the HW for enable/disable regulator * @regulator: regulator source * @enable: true for enable, false for disable * * Request that the regulator be enabled/disabled with the regulator output at * the predefined voltage or current value. * * Return: 0 on success or a negative error number on failure. */ int regulator_hardware_enable(struct regulator *regulator, bool enable) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int ret = -EOPNOTSUPP; if (!rdev->exclusive || !ops || !ops->enable || !ops->disable) return ret; if (enable) ret = ops->enable(rdev); else ret = ops->disable(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_hardware_enable); /** * regulator_get_linear_step - return the voltage step size between VSEL values * @regulator: regulator source * * Return: The voltage step size between VSEL values for linear regulators, * or 0 if the regulator isn't a linear regulator. */ unsigned int regulator_get_linear_step(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; return rdev->desc->uV_step; } EXPORT_SYMBOL_GPL(regulator_get_linear_step); /** * regulator_is_supported_voltage - check if a voltage range can be supported * * @regulator: Regulator to check. * @min_uV: Minimum required voltage in uV. * @max_uV: Maximum required voltage in uV. * * Return: 1 if the voltage range is supported, 0 if not, or a negative error * number if @regulator's voltage can't be changed and voltage readback * failed. */ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int i, voltages, ret; /* If we can't change voltage check the current voltage */ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) { ret = regulator_get_voltage(regulator); if (ret >= 0) return min_uV <= ret && ret <= max_uV; else return ret; } /* Any voltage within constrains range is fine? */ if (rdev->desc->continuous_voltage_range) return min_uV >= rdev->constraints->min_uV && max_uV <= rdev->constraints->max_uV; ret = regulator_count_voltages(regulator); if (ret < 0) return 0; voltages = ret; for (i = 0; i < voltages; i++) { ret = regulator_list_voltage(regulator, i); if (ret >= min_uV && ret <= max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); static int regulator_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { const struct regulator_desc *desc = rdev->desc; if (desc->ops->map_voltage) return desc->ops->map_voltage(rdev, min_uV, max_uV); if (desc->ops->list_voltage == regulator_list_voltage_linear) return regulator_map_voltage_linear(rdev, min_uV, max_uV); if (desc->ops->list_voltage == regulator_list_voltage_linear_range) return regulator_map_voltage_linear_range(rdev, min_uV, max_uV); if (desc->ops->list_voltage == regulator_list_voltage_pickable_linear_range) return regulator_map_voltage_pickable_linear_range(rdev, min_uV, max_uV); return regulator_map_voltage_iterate(rdev, min_uV, max_uV); } static int _regulator_call_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct pre_voltage_change_data data; int ret; data.old_uV = regulator_get_voltage_rdev(rdev); data.min_uV = min_uV; data.max_uV = max_uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev, int uV, unsigned selector) { struct pre_voltage_change_data data; int ret; data.old_uV = regulator_get_voltage_rdev(rdev); data.min_uV = uV; data.max_uV = uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage_sel(rdev, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_set_voltage_sel_step(struct regulator_dev *rdev, int uV, int new_selector) { const struct regulator_ops *ops = rdev->desc->ops; int diff, old_sel, curr_sel, ret; /* Stepping is only needed if the regulator is enabled. */ if (!_regulator_is_enabled(rdev)) goto final_set; if (!ops->get_voltage_sel) return -EINVAL; old_sel = ops->get_voltage_sel(rdev); if (old_sel < 0) return old_sel; diff = new_selector - old_sel; if (diff == 0) return 0; /* No change needed. */ if (diff > 0) { /* Stepping up. */ for (curr_sel = old_sel + rdev->desc->vsel_step; curr_sel < new_selector; curr_sel += rdev->desc->vsel_step) { /* * Call the callback directly instead of using * _regulator_call_set_voltage_sel() as we don't * want to notify anyone yet. Same in the branch * below. */ ret = ops->set_voltage_sel(rdev, curr_sel); if (ret) goto try_revert; } } else { /* Stepping down. */ for (curr_sel = old_sel - rdev->desc->vsel_step; curr_sel > new_selector; curr_sel -= rdev->desc->vsel_step) { ret = ops->set_voltage_sel(rdev, curr_sel); if (ret) goto try_revert; } } final_set: /* The final selector will trigger the notifiers. */ return _regulator_call_set_voltage_sel(rdev, uV, new_selector); try_revert: /* * At least try to return to the previous voltage if setting a new * one failed. */ (void)ops->set_voltage_sel(rdev, old_sel); return ret; } static int _regulator_set_voltage_time(struct regulator_dev *rdev, int old_uV, int new_uV) { unsigned int ramp_delay = 0; if (rdev->constraints->ramp_delay) ramp_delay = rdev->constraints->ramp_delay; else if (rdev->desc->ramp_delay) ramp_delay = rdev->desc->ramp_delay; else if (rdev->constraints->settling_time) return rdev->constraints->settling_time; else if (rdev->constraints->settling_time_up && (new_uV > old_uV)) return rdev->constraints->settling_time_up; else if (rdev->constraints->settling_time_down && (new_uV < old_uV)) return rdev->constraints->settling_time_down; if (ramp_delay == 0) return 0; return DIV_ROUND_UP(abs(new_uV - old_uV), ramp_delay); } static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret; int delay = 0; int best_val = 0; unsigned int selector; int old_selector = -1; const struct regulator_ops *ops = rdev->desc->ops; int old_uV = regulator_get_voltage_rdev(rdev); trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); min_uV += rdev->constraints->uV_offset; max_uV += rdev->constraints->uV_offset; /* * If we can't obtain the old selector there is not enough * info to call set_voltage_time_sel(). */ if (_regulator_is_enabled(rdev) && ops->set_voltage_time_sel && ops->get_voltage_sel) { old_selector = ops->get_voltage_sel(rdev); if (old_selector < 0) return old_selector; } if (ops->set_voltage) { ret = _regulator_call_set_voltage(rdev, min_uV, max_uV, &selector); if (ret >= 0) { if (ops->list_voltage) best_val = ops->list_voltage(rdev, selector); else best_val = regulator_get_voltage_rdev(rdev); } } else if (ops->set_voltage_sel) { ret = regulator_map_voltage(rdev, min_uV, max_uV); if (ret >= 0) { best_val = ops->list_voltage(rdev, ret); if (min_uV <= best_val && max_uV >= best_val) { selector = ret; if (old_selector == selector) ret = 0; else if (rdev->desc->vsel_step) ret = _regulator_set_voltage_sel_step( rdev, best_val, selector); else ret = _regulator_call_set_voltage_sel( rdev, best_val, selector); } else { ret = -EINVAL; } } } else { ret = -EINVAL; } if (ret) goto out; if (ops->set_voltage_time_sel) { /* * Call set_voltage_time_sel if successfully obtained * old_selector */ if (old_selector >= 0 && old_selector != selector) delay = ops->set_voltage_time_sel(rdev, old_selector, selector); } else { if (old_uV != best_val) { if (ops->set_voltage_time) delay = ops->set_voltage_time(rdev, old_uV, best_val); else delay = _regulator_set_voltage_time(rdev, old_uV, best_val); } } if (delay < 0) { rdev_warn(rdev, "failed to get delay: %pe\n", ERR_PTR(delay)); delay = 0; } /* Insert any necessary delays */ fsleep(delay); if (best_val >= 0) { unsigned long data = best_val; _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, (void *)data); } out: trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val); return ret; } static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, suspend_state_t state) { struct regulator_state *rstate; int uV, sel; rstate = regulator_get_suspend_state(rdev, state); if (rstate == NULL) return -EINVAL; if (min_uV < rstate->min_uV) min_uV = rstate->min_uV; if (max_uV > rstate->max_uV) max_uV = rstate->max_uV; sel = regulator_map_voltage(rdev, min_uV, max_uV); if (sel < 0) return sel; uV = rdev->desc->ops->list_voltage(rdev, sel); if (uV >= min_uV && uV <= max_uV) rstate->uV = uV; return 0; } static int regulator_set_voltage_unlocked(struct regulator *regulator, int min_uV, int max_uV, suspend_state_t state) { struct regulator_dev *rdev = regulator->rdev; struct regulator_voltage *voltage = &regulator->voltage[state]; int ret = 0; int old_min_uV, old_max_uV; int current_uV; /* If we're setting the same range as last time the change * should be a noop (some cpufreq implementations use the same * voltage for multiple frequencies, for example). */ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV) goto out; /* If we're trying to set a range that overlaps the current voltage, * return successfully even though the regulator does not support * changing the voltage. */ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) { current_uV = regulator_get_voltage_rdev(rdev); if (min_uV <= current_uV && current_uV <= max_uV) { voltage->min_uV = min_uV; voltage->max_uV = max_uV; goto out; } } /* sanity check */ if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; /* restore original values in case of error */ old_min_uV = voltage->min_uV; old_max_uV = voltage->max_uV; voltage->min_uV = min_uV; voltage->max_uV = max_uV; /* for not coupled regulators this will just set the voltage */ ret = regulator_balance_voltage(rdev, state); if (ret < 0) { voltage->min_uV = old_min_uV; voltage->max_uV = old_max_uV; } out: return ret; } int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, int max_uV, suspend_state_t state) { int best_supply_uV = 0; int supply_change_uV = 0; int ret; if (rdev->supply && regulator_ops_is_valid(rdev->supply->rdev, REGULATOR_CHANGE_VOLTAGE) && (rdev->desc->min_dropout_uV || !(rdev->desc->ops->get_voltage || rdev->desc->ops->get_voltage_sel))) { int current_supply_uV; int selector; selector = regulator_map_voltage(rdev, min_uV, max_uV); if (selector < 0) { ret = selector; goto out; } best_supply_uV = _regulator_list_voltage(rdev, selector, 0); if (best_supply_uV < 0) { ret = best_supply_uV; goto out; } best_supply_uV += rdev->desc->min_dropout_uV; current_supply_uV = regulator_get_voltage_rdev(rdev->supply->rdev); if (current_supply_uV < 0) { ret = current_supply_uV; goto out; } supply_change_uV = best_supply_uV - current_supply_uV; } if (supply_change_uV > 0) { ret = regulator_set_voltage_unlocked(rdev->supply, best_supply_uV, INT_MAX, state); if (ret) { dev_err(&rdev->dev, "Failed to increase supply voltage: %pe\n", ERR_PTR(ret)); goto out; } } if (state == PM_SUSPEND_ON) ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); else ret = _regulator_do_set_suspend_voltage(rdev, min_uV, max_uV, state); if (ret < 0) goto out; if (supply_change_uV < 0) { ret = regulator_set_voltage_unlocked(rdev->supply, best_supply_uV, INT_MAX, state); if (ret) dev_warn(&rdev->dev, "Failed to decrease supply voltage: %pe\n", ERR_PTR(ret)); /* No need to fail here */ ret = 0; } out: return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) { struct regulation_constraints *constraints = rdev->constraints; /* Limit voltage change only if necessary */ if (!constraints->max_uV_step || !_regulator_is_enabled(rdev)) return 1; if (*current_uV < 0) { *current_uV = regulator_get_voltage_rdev(rdev); if (*current_uV < 0) return *current_uV; } if (abs(*current_uV - *min_uV) <= constraints->max_uV_step) return 1; /* Clamp target voltage within the given step */ if (*current_uV < *min_uV) *min_uV = min(*current_uV + constraints->max_uV_step, *min_uV); else *min_uV = max(*current_uV - constraints->max_uV_step, *min_uV); return 0; } static int regulator_get_optimal_voltage(struct regulator_dev *rdev, int *current_uV, int *min_uV, int *max_uV, suspend_state_t state, int n_coupled) { struct coupling_desc *c_desc = &rdev->coupling_desc; struct regulator_dev **c_rdevs = c_desc->coupled_rdevs; struct regulation_constraints *constraints = rdev->constraints; int desired_min_uV = 0, desired_max_uV = INT_MAX; int max_current_uV = 0, min_current_uV = INT_MAX; int highest_min_uV = 0, target_uV, possible_uV; int i, ret, max_spread; bool done; *current_uV = -1; /* * If there are no coupled regulators, simply set the voltage * demanded by consumers. */ if (n_coupled == 1) { /* * If consumers don't provide any demands, set voltage * to min_uV */ desired_min_uV = constraints->min_uV; desired_max_uV = constraints->max_uV; ret = regulator_check_consumers(rdev, &desired_min_uV, &desired_max_uV, state); if (ret < 0) return ret; done = true; goto finish; } /* Find highest min desired voltage */ for (i = 0; i < n_coupled; i++) { int tmp_min = 0; int tmp_max = INT_MAX; lockdep_assert_held_once(&c_rdevs[i]->mutex.base); ret = regulator_check_consumers(c_rdevs[i], &tmp_min, &tmp_max, state); if (ret < 0) return ret; ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max); if (ret < 0) return ret; highest_min_uV = max(highest_min_uV, tmp_min); if (i == 0) { desired_min_uV = tmp_min; desired_max_uV = tmp_max; } } max_spread = constraints->max_spread[0]; /* * Let target_uV be equal to the desired one if possible. * If not, set it to minimum voltage, allowed by other coupled * regulators. */ target_uV = max(desired_min_uV, highest_min_uV - max_spread); /* * Find min and max voltages, which currently aren't violating * max_spread. */ for (i = 1; i < n_coupled; i++) { int tmp_act; if (!_regulator_is_enabled(c_rdevs[i])) continue; tmp_act = regulator_get_voltage_rdev(c_rdevs[i]); if (tmp_act < 0) return tmp_act; min_current_uV = min(tmp_act, min_current_uV); max_current_uV = max(tmp_act, max_current_uV); } /* There aren't any other regulators enabled */ if (max_current_uV == 0) { possible_uV = target_uV; } else { /* * Correct target voltage, so as it currently isn't * violating max_spread */ possible_uV = max(target_uV, max_current_uV - max_spread); possible_uV = min(possible_uV, min_current_uV + max_spread); } if (possible_uV > desired_max_uV) return -EINVAL; done = (possible_uV == target_uV); desired_min_uV = possible_uV; finish: /* Apply max_uV_step constraint if necessary */ if (state == PM_SUSPEND_ON) { ret = regulator_limit_voltage_step(rdev, current_uV, &desired_min_uV); if (ret < 0) return ret; if (ret == 0) done = false; } /* Set current_uV if wasn't done earlier in the code and if necessary */ if (n_coupled > 1 && *current_uV == -1) { if (_regulator_is_enabled(rdev)) { ret = regulator_get_voltage_rdev(rdev); if (ret < 0) return ret; *current_uV = ret; } else { *current_uV = desired_min_uV; } } *min_uV = desired_min_uV; *max_uV = desired_max_uV; return done; } int regulator_do_balance_voltage(struct regulator_dev *rdev, suspend_state_t state, bool skip_coupled) { struct regulator_dev **c_rdevs; struct regulator_dev *best_rdev; struct coupling_desc *c_desc = &rdev->coupling_desc; int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev; unsigned int delta, best_delta; unsigned long c_rdev_done = 0; bool best_c_rdev_done; c_rdevs = c_desc->coupled_rdevs; n_coupled = skip_coupled ? 1 : c_desc->n_coupled; /* * Find the best possible voltage change on each loop. Leave the loop * if there isn't any possible change. */ do { best_c_rdev_done = false; best_delta = 0; best_min_uV = 0; best_max_uV = 0; best_c_rdev = 0; best_rdev = NULL; /* * Find highest difference between optimal voltage * and current voltage. */ for (i = 0; i < n_coupled; i++) { /* * optimal_uV is the best voltage that can be set for * i-th regulator at the moment without violating * max_spread constraint in order to balance * the coupled voltages. */ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0; if (test_bit(i, &c_rdev_done)) continue; ret = regulator_get_optimal_voltage(c_rdevs[i], &current_uV, &optimal_uV, &optimal_max_uV, state, n_coupled); if (ret < 0) goto out; delta = abs(optimal_uV - current_uV); if (delta && best_delta <= delta) { best_c_rdev_done = ret; best_delta = delta; best_rdev = c_rdevs[i]; best_min_uV = optimal_uV; best_max_uV = optimal_max_uV; best_c_rdev = i; } } /* Nothing to change, return successfully */ if (!best_rdev) { ret = 0; goto out; } ret = regulator_set_voltage_rdev(best_rdev, best_min_uV, best_max_uV, state); if (ret < 0) goto out; if (best_c_rdev_done) set_bit(best_c_rdev, &c_rdev_done); } while (n_coupled > 1); out: return ret; } static int regulator_balance_voltage(struct regulator_dev *rdev, suspend_state_t state) { struct coupling_desc *c_desc = &rdev->coupling_desc; struct regulator_coupler *coupler = c_desc->coupler; bool skip_coupled = false; /* * If system is in a state other than PM_SUSPEND_ON, don't check * other coupled regulators. */ if (state != PM_SUSPEND_ON) skip_coupled = true; if (c_desc->n_resolved < c_desc->n_coupled) { rdev_err(rdev, "Not all coupled regulators registered\n"); return -EPERM; } /* Invoke custom balancer for customized couplers */ if (coupler && coupler->balance_voltage) return coupler->balance_voltage(coupler, rdev, state); return regulator_do_balance_voltage(rdev, state, skip_coupled); } /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source * @min_uV: Minimum required voltage in uV * @max_uV: Maximum acceptable voltage in uV * * Sets a voltage regulator to the desired output voltage. This can be set * during any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the voltage will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new voltage when enabled. * * NOTE: If the regulator is shared between several devices then the lowest * request voltage that meets the system constraints will be used. * Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. * * Return: 0 on success or a negative error number on failure. */ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct ww_acquire_ctx ww_ctx; int ret; regulator_lock_dependent(regulator->rdev, &ww_ctx); ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV, PM_SUSPEND_ON); regulator_unlock_dependent(regulator->rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage); static inline int regulator_suspend_toggle(struct regulator_dev *rdev, suspend_state_t state, bool en) { struct regulator_state *rstate; rstate = regulator_get_suspend_state(rdev, state); if (rstate == NULL) return -EINVAL; if (!rstate->changeable) return -EPERM; rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND; return 0; } int regulator_suspend_enable(struct regulator_dev *rdev, suspend_state_t state) { return regulator_suspend_toggle(rdev, state, true); } EXPORT_SYMBOL_GPL(regulator_suspend_enable); int regulator_suspend_disable(struct regulator_dev *rdev, suspend_state_t state) { struct regulator *regulator; struct regulator_voltage *voltage; /* * if any consumer wants this regulator device keeping on in * suspend states, don't set it as disabled. */ list_for_each_entry(regulator, &rdev->consumer_list, list) { voltage = &regulator->voltage[state]; if (voltage->min_uV || voltage->max_uV) return 0; } return regulator_suspend_toggle(rdev, state, false); } EXPORT_SYMBOL_GPL(regulator_suspend_disable); static int _regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, int max_uV, suspend_state_t state) { struct regulator_dev *rdev = regulator->rdev; struct regulator_state *rstate; rstate = regulator_get_suspend_state(rdev, state); if (rstate == NULL) return -EINVAL; if (rstate->min_uV == rstate->max_uV) { rdev_err(rdev, "The suspend voltage can't be changed!\n"); return -EPERM; } return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state); } int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, int max_uV, suspend_state_t state) { struct ww_acquire_ctx ww_ctx; int ret; /* PM_SUSPEND_ON is handled by regulator_set_voltage() */ if (regulator_check_states(state) || state == PM_SUSPEND_ON) return -EINVAL; regulator_lock_dependent(regulator->rdev, &ww_ctx); ret = _regulator_set_suspend_voltage(regulator, min_uV, max_uV, state); regulator_unlock_dependent(regulator->rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage); /** * regulator_set_voltage_time - get raise/fall time * @regulator: regulator source * @old_uV: starting voltage in microvolts * @new_uV: target voltage in microvolts * * Provided with the starting and ending voltage, this function attempts to * calculate the time in microseconds required to rise or fall to this new * voltage. * * Return: ramp time in microseconds, or a negative error number if calculation failed. */ int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int old_sel = -1; int new_sel = -1; int voltage; int i; if (ops->set_voltage_time) return ops->set_voltage_time(rdev, old_uV, new_uV); else if (!ops->set_voltage_time_sel) return _regulator_set_voltage_time(rdev, old_uV, new_uV); /* Currently requires operations to do this */ if (!ops->list_voltage || !rdev->desc->n_voltages) return -EINVAL; for (i = 0; i < rdev->desc->n_voltages; i++) { /* We only look for exact voltage matches here */ if (i < rdev->desc->linear_min_sel) continue; if (old_sel >= 0 && new_sel >= 0) break; voltage = regulator_list_voltage(regulator, i); if (voltage < 0) return -EINVAL; if (voltage == 0) continue; if (voltage == old_uV) old_sel = i; if (voltage == new_uV) new_sel = i; } if (old_sel < 0 || new_sel < 0) return -EINVAL; return ops->set_voltage_time_sel(rdev, old_sel, new_sel); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time); /** * regulator_set_voltage_time_sel - get raise/fall time * @rdev: regulator source device * @old_selector: selector for starting voltage * @new_selector: selector for target voltage * * Provided with the starting and target voltage selectors, this function * returns time in microseconds required to rise or fall to this new voltage * * Drivers providing ramp_delay in regulation_constraints can use this as their * set_voltage_time_sel() operation. * * Return: ramp time in microseconds, or a negative error number if calculation failed. */ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_selector, unsigned int new_selector) { int old_volt, new_volt; /* sanity check */ if (!rdev->desc->ops->list_voltage) return -EINVAL; old_volt = rdev->desc->ops->list_voltage(rdev, old_selector); new_volt = rdev->desc->ops->list_voltage(rdev, new_selector); if (rdev->desc->ops->set_voltage_time) return rdev->desc->ops->set_voltage_time(rdev, old_volt, new_volt); else return _regulator_set_voltage_time(rdev, old_volt, new_volt); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel); int regulator_sync_voltage_rdev(struct regulator_dev *rdev) { int ret; regulator_lock(rdev); if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* balance only, if regulator is coupled */ if (rdev->coupling_desc.n_coupled > 1) ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON); else ret = -EOPNOTSUPP; out: regulator_unlock(rdev); return ret; } /** * regulator_sync_voltage - re-apply last regulator output voltage * @regulator: regulator source * * Re-apply the last configured voltage. This is intended to be used * where some external control source the consumer is cooperating with * has caused the configured voltage to change. * * Return: 0 on success or a negative error number on failure. */ int regulator_sync_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON]; int ret, min_uV, max_uV; if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) return 0; regulator_lock(rdev); if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* This is only going to work if we've had a voltage configured. */ if (!voltage->min_uV && !voltage->max_uV) { ret = -EINVAL; goto out; } min_uV = voltage->min_uV; max_uV = voltage->max_uV; /* This should be a paranoia check... */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0); if (ret < 0) goto out; /* balance only, if regulator is coupled */ if (rdev->coupling_desc.n_coupled > 1) ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON); else ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_sync_voltage); int regulator_get_voltage_rdev(struct regulator_dev *rdev) { int sel, ret; bool bypassed; if (rdev->desc->ops->get_bypass) { ret = rdev->desc->ops->get_bypass(rdev, &bypassed); if (ret < 0) return ret; if (bypassed) { /* if bypassed the regulator must have a supply */ if (!rdev->supply) { rdev_err(rdev, "bypassed regulator has no supply!\n"); return -EPROBE_DEFER; } return regulator_get_voltage_rdev(rdev->supply->rdev); } } if (rdev->desc->ops->get_voltage_sel) { sel = rdev->desc->ops->get_voltage_sel(rdev); if (sel < 0) return sel; ret = rdev->desc->ops->list_voltage(rdev, sel); } else if (rdev->desc->ops->get_voltage) { ret = rdev->desc->ops->get_voltage(rdev); } else if (rdev->desc->ops->list_voltage) { ret = rdev->desc->ops->list_voltage(rdev, 0); } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) { ret = rdev->desc->fixed_uV; } else if (rdev->supply) { ret = regulator_get_voltage_rdev(rdev->supply->rdev); } else if (rdev->supply_name) { return -EPROBE_DEFER; } else { return -EINVAL; } if (ret < 0) return ret; return ret - rdev->constraints->uV_offset; } EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage * @regulator: regulator source * * Return: Current regulator voltage in uV, or a negative error number on failure. * * NOTE: If the regulator is disabled it will return the voltage value. This * function should not be used to determine regulator state. */ int regulator_get_voltage(struct regulator *regulator) { struct ww_acquire_ctx ww_ctx; int ret; regulator_lock_dependent(regulator->rdev, &ww_ctx); ret = regulator_get_voltage_rdev(regulator->rdev); regulator_unlock_dependent(regulator->rdev, &ww_ctx); return ret; } EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source * @min_uA: Minimum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during * any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the current will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new current when enabled. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. * * Return: 0 on success or a negative error number on failure. */ int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { struct regulator_dev *rdev = regulator->rdev; int ret; regulator_lock(rdev); /* sanity check */ if (!rdev->desc->ops->set_current_limit) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_current_limit(rdev, &min_uA, &max_uA); if (ret < 0) goto out; ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA); out: regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_set_current_limit); static int _regulator_get_current_limit_unlocked(struct regulator_dev *rdev) { /* sanity check */ if (!rdev->desc->ops->get_current_limit) return -EINVAL; return rdev->desc->ops->get_current_limit(rdev); } static int _regulator_get_current_limit(struct regulator_dev *rdev) { int ret; regulator_lock(rdev); ret = _regulator_get_current_limit_unlocked(rdev); regulator_unlock(rdev); return ret; } /** * regulator_get_current_limit - get regulator output current * @regulator: regulator source * * Return: Current supplied by the specified current sink in uA, * or a negative error number on failure. * * NOTE: If the regulator is disabled it will return the current value. This * function should not be used to determine regulator state. */ int regulator_get_current_limit(struct regulator *regulator) { return _regulator_get_current_limit(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_current_limit); /** * regulator_get_unclaimed_power_budget - get regulator unclaimed power budget * @regulator: regulator source * * Return: Unclaimed power budget of the regulator in mW. */ int regulator_get_unclaimed_power_budget(struct regulator *regulator) { return regulator->rdev->constraints->pw_budget_mW - regulator->rdev->pw_requested_mW; } EXPORT_SYMBOL_GPL(regulator_get_unclaimed_power_budget); /** * regulator_request_power_budget - request power budget on a regulator * @regulator: regulator source * @pw_req: Power requested * * Return: 0 on success or a negative error number on failure. */ int regulator_request_power_budget(struct regulator *regulator, unsigned int pw_req) { struct regulator_dev *rdev = regulator->rdev; int ret = 0, pw_tot_req; regulator_lock(rdev); if (rdev->supply) { ret = regulator_request_power_budget(rdev->supply, pw_req); if (ret < 0) goto out; } pw_tot_req = rdev->pw_requested_mW + pw_req; if (pw_tot_req > rdev->constraints->pw_budget_mW) { rdev_warn(rdev, "power requested %d mW out of budget %d mW", pw_req, rdev->constraints->pw_budget_mW - rdev->pw_requested_mW); regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT_WARN, NULL); ret = -ERANGE; goto out; } rdev->pw_requested_mW = pw_tot_req; out: regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_request_power_budget); /** * regulator_free_power_budget - free power budget on a regulator * @regulator: regulator source * @pw: Power to be released. * * Return: Power budget of the regulator in mW. */ void regulator_free_power_budget(struct regulator *regulator, unsigned int pw) { struct regulator_dev *rdev = regulator->rdev; int pw_tot_req; regulator_lock(rdev); if (rdev->supply) regulator_free_power_budget(rdev->supply, pw); pw_tot_req = rdev->pw_requested_mW - pw; if (pw_tot_req >= 0) rdev->pw_requested_mW = pw_tot_req; else rdev_warn(rdev, "too much power freed %d mW (already requested %d mW)", pw, rdev->pw_requested_mW); regulator_unlock(rdev); } EXPORT_SYMBOL_GPL(regulator_free_power_budget); /** * regulator_set_mode - set regulator operating mode * @regulator: regulator source * @mode: operating mode - one of the REGULATOR_MODE constants * * Set regulator operating mode to increase regulator efficiency or improve * regulation performance. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. * * Return: 0 on success or a negative error number on failure. */ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { struct regulator_dev *rdev = regulator->rdev; int ret; int regulator_curr_mode; regulator_lock(rdev); /* sanity check */ if (!rdev->desc->ops->set_mode) { ret = -EINVAL; goto out; } /* return if the same mode is requested */ if (rdev->desc->ops->get_mode) { regulator_curr_mode = rdev->desc->ops->get_mode(rdev); if (regulator_curr_mode == mode) { ret = 0; goto out; } } /* constraints check */ ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) goto out; ret = rdev->desc->ops->set_mode(rdev, mode); out: regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_set_mode); static unsigned int _regulator_get_mode_unlocked(struct regulator_dev *rdev) { /* sanity check */ if (!rdev->desc->ops->get_mode) return -EINVAL; return rdev->desc->ops->get_mode(rdev); } static unsigned int _regulator_get_mode(struct regulator_dev *rdev) { int ret; regulator_lock(rdev); ret = _regulator_get_mode_unlocked(rdev); regulator_unlock(rdev); return ret; } /** * regulator_get_mode - get regulator operating mode * @regulator: regulator source * * Get the current regulator operating mode. * * Return: Current operating mode as %REGULATOR_MODE_* values, * or a negative error number on failure. */ unsigned int regulator_get_mode(struct regulator *regulator) { return _regulator_get_mode(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_mode); static int rdev_get_cached_err_flags(struct regulator_dev *rdev) { int ret = 0; if (rdev->use_cached_err) { spin_lock(&rdev->err_lock); ret = rdev->cached_err; spin_unlock(&rdev->err_lock); } return ret; } static int _regulator_get_error_flags(struct regulator_dev *rdev, unsigned int *flags) { int cached_flags, ret = 0; regulator_lock(rdev); cached_flags = rdev_get_cached_err_flags(rdev); if (rdev->desc->ops->get_error_flags) ret = rdev->desc->ops->get_error_flags(rdev, flags); else if (!rdev->use_cached_err) ret = -EINVAL; *flags |= cached_flags; regulator_unlock(rdev); return ret; } /** * regulator_get_error_flags - get regulator error information * @regulator: regulator source * @flags: pointer to store error flags * * Get the current regulator error information. * * Return: 0 on success or a negative error number on failure. */ int regulator_get_error_flags(struct regulator *regulator, unsigned int *flags) { return _regulator_get_error_flags(regulator->rdev, flags); } EXPORT_SYMBOL_GPL(regulator_get_error_flags); /** * regulator_set_load - set regulator load * @regulator: regulator source * @uA_load: load current * * Notifies the regulator core of a new device load. This is then used by * DRMS (if enabled by constraints) to set the most efficient regulator * operating mode for the new regulator loading. * * Consumer devices notify their supply regulator of the maximum power * they will require (can be taken from device datasheet in the power * consumption tables) when they change operational status and hence power * state. Examples of operational state changes that can affect power * consumption are :- * * o Device is opened / closed. * o Device I/O is about to begin or has just finished. * o Device is idling in between work. * * This information is also exported via sysfs to userspace. * * DRMS will sum the total requested load on the regulator and change * to the most efficient operating mode if platform constraints allow. * * NOTE: when a regulator consumer requests to have a regulator * disabled then any load that consumer requested no longer counts * toward the total requested load. If the regulator is re-enabled * then the previously requested load will start counting again. * * If a regulator is an always-on regulator then an individual consumer's * load will still be removed if that consumer is fully disabled. * * Return: 0 on success or a negative error number on failure. */ int regulator_set_load(struct regulator *regulator, int uA_load) { struct regulator_dev *rdev = regulator->rdev; int old_uA_load; int ret = 0; regulator_lock(rdev); old_uA_load = regulator->uA_load; regulator->uA_load = uA_load; if (regulator->enable_count && old_uA_load != uA_load) { ret = drms_uA_update(rdev); if (ret < 0) regulator->uA_load = old_uA_load; } regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_set_load); /** * regulator_allow_bypass - allow the regulator to go into bypass mode * * @regulator: Regulator to configure * @enable: enable or disable bypass mode * * Allow the regulator to go into bypass mode if all other consumers * for the regulator also enable bypass mode and the machine * constraints allow this. Bypass mode means that the regulator is * simply passing the input directly to the output with no regulation. * * Return: 0 on success or if changing bypass is not possible, or * a negative error number on failure. */ int regulator_allow_bypass(struct regulator *regulator, bool enable) { struct regulator_dev *rdev = regulator->rdev; const char *name = rdev_get_name(rdev); int ret = 0; if (!rdev->desc->ops->set_bypass) return 0; if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_BYPASS)) return 0; regulator_lock(rdev); if (enable && !regulator->bypass) { rdev->bypass_count++; if (rdev->bypass_count == rdev->open_count) { trace_regulator_bypass_enable(name); ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count--; else trace_regulator_bypass_enable_complete(name); } } else if (!enable && regulator->bypass) { rdev->bypass_count--; if (rdev->bypass_count != rdev->open_count) { trace_regulator_bypass_disable(name); ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count++; else trace_regulator_bypass_disable_complete(name); } } if (ret == 0) regulator->bypass = enable; regulator_unlock(rdev); return ret; } EXPORT_SYMBOL_GPL(regulator_allow_bypass); /** * regulator_register_notifier - register regulator event notifier * @regulator: regulator source * @nb: notifier block * * Register notifier block to receive regulator events. * * Return: 0 on success or a negative error number on failure. */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_register(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_register_notifier); /** * regulator_unregister_notifier - unregister regulator event notifier * @regulator: regulator source * @nb: notifier block * * Unregister regulator event notifier block. * * Return: 0 on success or a negative error number on failure. */ int regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ int ret = blocking_notifier_call_chain(&rdev->notifier, event, data); if (IS_REACHABLE(CONFIG_REGULATOR_NETLINK_EVENTS)) { struct device *parent = rdev->dev.parent; const char *rname = rdev_get_name(rdev); char name[32]; /* Avoid duplicate debugfs directory names */ if (parent && rname == rdev->desc->name) { snprintf(name, sizeof(name), "%s-%s", dev_name(parent), rname); rname = name; } reg_generate_netlink_event(rname, event); } return ret; } int _regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers, enum regulator_get_type get_type) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].consumer = NULL; for (i = 0; i < num_consumers; i++) { consumers[i].consumer = _regulator_get(dev, consumers[i].supply, get_type); if (IS_ERR(consumers[i].consumer)) { ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer), "Failed to get supply '%s'\n", consumers[i].supply); consumers[i].consumer = NULL; goto err; } if (consumers[i].init_load_uA > 0) { ret = regulator_set_load(consumers[i].consumer, consumers[i].init_load_uA); if (ret) { i++; goto err; } } } return 0; err: while (--i >= 0) regulator_put(consumers[i].consumer); return ret; } /** * regulator_bulk_get - get multiple regulator consumers * * @dev: Device to supply * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed * before returning to the caller. * * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) { return _regulator_bulk_get(dev, num_consumers, consumers, NORMAL_GET); } EXPORT_SYMBOL_GPL(regulator_bulk_get); static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) { struct regulator_bulk_data *bulk = data; bulk->ret = regulator_enable(bulk->consumer); } /** * regulator_bulk_enable - enable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to enable multiple regulator * clients in a single API call. If any consumers cannot be enabled * then any others that were enabled will be disabled again prior to * return. * * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { ASYNC_DOMAIN_EXCLUSIVE(async_domain); int i; int ret = 0; for (i = 0; i < num_consumers; i++) { async_schedule_domain(regulator_bulk_enable_async, &consumers[i], &async_domain); } async_synchronize_full_domain(&async_domain); /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto err; } } return 0; err: for (i = 0; i < num_consumers; i++) { if (consumers[i].ret < 0) pr_err("Failed to enable %s: %pe\n", consumers[i].supply, ERR_PTR(consumers[i].ret)); else regulator_disable(consumers[i].consumer); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_enable); /** * regulator_bulk_disable - disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to disable multiple regulator * clients in a single API call. If any consumers cannot be disabled * then any others that were disabled will be enabled again prior to * return. * * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret, r; for (i = num_consumers - 1; i >= 0; --i) { ret = regulator_disable(consumers[i].consumer); if (ret != 0) goto err; } return 0; err: pr_err("Failed to disable %s: %pe\n", consumers[i].supply, ERR_PTR(ret)); for (++i; i < num_consumers; ++i) { r = regulator_enable(consumers[i].consumer); if (r != 0) pr_err("Failed to re-enable %s: %pe\n", consumers[i].supply, ERR_PTR(r)); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_disable); /** * regulator_bulk_force_disable - force disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to forcibly disable multiple regulator * clients in a single API call. * NOTE: This should be used for situations when device damage will * likely occur if the regulators are not disabled (e.g. over temp). * Although regulator_force_disable function call for some consumers can * return error numbers, the function is called for all consumers. * * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_force_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret = 0; for (i = 0; i < num_consumers; i++) { consumers[i].ret = regulator_force_disable(consumers[i].consumer); /* Store first error for reporting */ if (consumers[i].ret && !ret) ret = consumers[i].ret; } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_force_disable); /** * regulator_bulk_free - free multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to free multiple regulator * clients in a single API call. */ void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers) { int i; for (i = 0; i < num_consumers; i++) { regulator_put(consumers[i].consumer); consumers[i].consumer = NULL; } } EXPORT_SYMBOL_GPL(regulator_bulk_free); /** * regulator_handle_critical - Handle events for system-critical regulators. * @rdev: The regulator device. * @event: The event being handled. * * This function handles critical events such as under-voltage, over-current, * and unknown errors for regulators deemed system-critical. On detecting such * events, it triggers a hardware protection shutdown with a defined timeout. */ static void regulator_handle_critical(struct regulator_dev *rdev, unsigned long event) { const char *reason = NULL; if (!rdev->constraints->system_critical) return; switch (event) { case REGULATOR_EVENT_UNDER_VOLTAGE: reason = "System critical regulator: voltage drop detected"; break; case REGULATOR_EVENT_OVER_CURRENT: reason = "System critical regulator: over-current detected"; break; case REGULATOR_EVENT_FAIL: reason = "System critical regulator: unknown error"; } if (!reason) return; hw_protection_shutdown(reason, rdev->constraints->uv_less_critical_window_ms); } /** * regulator_notifier_call_chain - call regulator event notifier * @rdev: regulator source * @event: notifier block * @data: callback-specific data. * * Called by regulator drivers to notify clients a regulator event has * occurred. * * Return: %NOTIFY_DONE. */ int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { regulator_handle_critical(rdev, event); _notifier_call_chain(rdev, event, data); return NOTIFY_DONE; } EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); /** * regulator_mode_to_status - convert a regulator mode into a status * * @mode: Mode to convert * * Convert a regulator mode into a status. * * Return: %REGULATOR_STATUS_* value corresponding to given mode. */ int regulator_mode_to_status(unsigned int mode) { switch (mode) { case REGULATOR_MODE_FAST: return REGULATOR_STATUS_FAST; case REGULATOR_MODE_NORMAL: return REGULATOR_STATUS_NORMAL; case REGULATOR_MODE_IDLE: return REGULATOR_STATUS_IDLE; case REGULATOR_MODE_STANDBY: return REGULATOR_STATUS_STANDBY; default: return REGULATOR_STATUS_UNDEFINED; } } EXPORT_SYMBOL_GPL(regulator_mode_to_status); static struct attribute *regulator_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_num_users.attr, &dev_attr_type.attr, &dev_attr_microvolts.attr, &dev_attr_microamps.attr, &dev_attr_opmode.attr, &dev_attr_state.attr, &dev_attr_status.attr, &dev_attr_bypass.attr, &dev_attr_requested_microamps.attr, &dev_attr_min_microvolts.attr, &dev_attr_max_microvolts.attr, &dev_attr_min_microamps.attr, &dev_attr_max_microamps.attr, &dev_attr_under_voltage.attr, &dev_attr_over_current.attr, &dev_attr_regulation_out.attr, &dev_attr_fail.attr, &dev_attr_over_temp.attr, &dev_attr_under_voltage_warn.attr, &dev_attr_over_current_warn.attr, &dev_attr_over_voltage_warn.attr, &dev_attr_over_temp_warn.attr, &dev_attr_suspend_standby_state.attr, &dev_attr_suspend_mem_state.attr, &dev_attr_suspend_disk_state.attr, &dev_attr_suspend_standby_microvolts.attr, &dev_attr_suspend_mem_microvolts.attr, &dev_attr_suspend_disk_microvolts.attr, &dev_attr_suspend_standby_mode.attr, &dev_attr_suspend_mem_mode.attr, &dev_attr_suspend_disk_mode.attr, &dev_attr_power_budget_milliwatt.attr, &dev_attr_power_requested_milliwatt.attr, NULL }; /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. */ static umode_t regulator_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct regulator_dev *rdev = dev_to_rdev(dev); const struct regulator_ops *ops = rdev->desc->ops; umode_t mode = attr->mode; /* these three are always present */ if (attr == &dev_attr_name.attr || attr == &dev_attr_num_users.attr || attr == &dev_attr_type.attr) return mode; /* some attributes need specific methods to be displayed */ if (attr == &dev_attr_microvolts.attr) { if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1)) return mode; return 0; } if (attr == &dev_attr_microamps.attr) return ops->get_current_limit ? mode : 0; if (attr == &dev_attr_opmode.attr) return ops->get_mode ? mode : 0; if (attr == &dev_attr_state.attr) return (rdev->ena_pin || ops->is_enabled) ? mode : 0; if (attr == &dev_attr_status.attr) return ops->get_status ? mode : 0; if (attr == &dev_attr_bypass.attr) return ops->get_bypass ? mode : 0; if (attr == &dev_attr_under_voltage.attr || attr == &dev_attr_over_current.attr || attr == &dev_attr_regulation_out.attr || attr == &dev_attr_fail.attr || attr == &dev_attr_over_temp.attr || attr == &dev_attr_under_voltage_warn.attr || attr == &dev_attr_over_current_warn.attr || attr == &dev_attr_over_voltage_warn.attr || attr == &dev_attr_over_temp_warn.attr) return ops->get_error_flags ? mode : 0; /* constraints need specific supporting methods */ if (attr == &dev_attr_min_microvolts.attr || attr == &dev_attr_max_microvolts.attr) return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0; if (attr == &dev_attr_min_microamps.attr || attr == &dev_attr_max_microamps.attr) return ops->set_current_limit ? mode : 0; if (attr == &dev_attr_suspend_standby_state.attr || attr == &dev_attr_suspend_mem_state.attr || attr == &dev_attr_suspend_disk_state.attr) return mode; if (attr == &dev_attr_suspend_standby_microvolts.attr || attr == &dev_attr_suspend_mem_microvolts.attr || attr == &dev_attr_suspend_disk_microvolts.attr) return ops->set_suspend_voltage ? mode : 0; if (attr == &dev_attr_suspend_standby_mode.attr || attr == &dev_attr_suspend_mem_mode.attr || attr == &dev_attr_suspend_disk_mode.attr) return ops->set_suspend_mode ? mode : 0; if (attr == &dev_attr_power_budget_milliwatt.attr || attr == &dev_attr_power_requested_milliwatt.attr) return rdev->constraints->pw_budget_mW != INT_MAX ? mode : 0; return mode; } static const struct attribute_group regulator_dev_group = { .attrs = regulator_dev_attrs, .is_visible = regulator_attr_is_visible, }; static const struct attribute_group *regulator_dev_groups[] = { &regulator_dev_group, NULL }; static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); debugfs_remove_recursive(rdev->debugfs); kfree(rdev->constraints); of_node_put(rdev->dev.of_node); kfree(rdev); } static void rdev_init_debugfs(struct regulator_dev *rdev) { struct device *parent = rdev->dev.parent; const char *rname = rdev_get_name(rdev); char name[NAME_MAX]; /* Avoid duplicate debugfs directory names */ if (parent && rname == rdev->desc->name) { snprintf(name, sizeof(name), "%s-%s", dev_name(parent), rname); rname = name; } rdev->debugfs = debugfs_create_dir(rname, debugfs_root); if (IS_ERR(rdev->debugfs)) rdev_dbg(rdev, "Failed to create debugfs directory\n"); debugfs_create_u32("use_count", 0444, rdev->debugfs, &rdev->use_count); debugfs_create_u32("open_count", 0444, rdev->debugfs, &rdev->open_count); debugfs_create_u32("bypass_count", 0444, rdev->debugfs, &rdev->bypass_count); } static int regulator_register_resolve_supply(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); if (regulator_resolve_supply(rdev)) rdev_dbg(rdev, "unable to resolve supply\n"); return 0; } int regulator_coupler_register(struct regulator_coupler *coupler) { mutex_lock(&regulator_list_mutex); list_add_tail(&coupler->list, &regulator_coupler_list); mutex_unlock(&regulator_list_mutex); return 0; } static struct regulator_coupler * regulator_find_coupler(struct regulator_dev *rdev) { struct regulator_coupler *coupler; int err; /* * Note that regulators are appended to the list and the generic * coupler is registered first, hence it will be attached at last * if nobody cared. */ list_for_each_entry_reverse(coupler, &regulator_coupler_list, list) { err = coupler->attach_regulator(coupler, rdev); if (!err) { if (!coupler->balance_voltage && rdev->coupling_desc.n_coupled > 2) goto err_unsupported; return coupler; } if (err < 0) return ERR_PTR(err); if (err == 1) continue; break; } return ERR_PTR(-EINVAL); err_unsupported: if (coupler->detach_regulator) coupler->detach_regulator(coupler, rdev); rdev_err(rdev, "Voltage balancing for multiple regulator couples is unimplemented\n"); return ERR_PTR(-EPERM); } static void regulator_resolve_coupling(struct regulator_dev *rdev) { struct regulator_coupler *coupler = rdev->coupling_desc.coupler; struct coupling_desc *c_desc = &rdev->coupling_desc; int n_coupled = c_desc->n_coupled; struct regulator_dev *c_rdev; int i; for (i = 1; i < n_coupled; i++) { /* already resolved */ if (c_desc->coupled_rdevs[i]) continue; c_rdev = of_parse_coupled_regulator(rdev, i - 1); if (!c_rdev) continue; if (c_rdev->coupling_desc.coupler != coupler) { rdev_err(rdev, "coupler mismatch with %s\n", rdev_get_name(c_rdev)); return; } c_desc->coupled_rdevs[i] = c_rdev; c_desc->n_resolved++; regulator_resolve_coupling(c_rdev); } } static void regulator_remove_coupling(struct regulator_dev *rdev) { struct regulator_coupler *coupler = rdev->coupling_desc.coupler; struct coupling_desc *__c_desc, *c_desc = &rdev->coupling_desc; struct regulator_dev *__c_rdev, *c_rdev; unsigned int __n_coupled, n_coupled; int i, k; int err; n_coupled = c_desc->n_coupled; for (i = 1; i < n_coupled; i++) { c_rdev = c_desc->coupled_rdevs[i]; if (!c_rdev) continue; regulator_lock(c_rdev); __c_desc = &c_rdev->coupling_desc; __n_coupled = __c_desc->n_coupled; for (k = 1; k < __n_coupled; k++) { __c_rdev = __c_desc->coupled_rdevs[k]; if (__c_rdev == rdev) { __c_desc->coupled_rdevs[k] = NULL; __c_desc->n_resolved--; break; } } regulator_unlock(c_rdev); c_desc->coupled_rdevs[i] = NULL; c_desc->n_resolved--; } if (coupler && coupler->detach_regulator) { err = coupler->detach_regulator(coupler, rdev); if (err) rdev_err(rdev, "failed to detach from coupler: %pe\n", ERR_PTR(err)); } kfree(rdev->coupling_desc.coupled_rdevs); rdev->coupling_desc.coupled_rdevs = NULL; } static int regulator_init_coupling(struct regulator_dev *rdev) { struct regulator_dev **coupled; int err, n_phandles; if (!IS_ENABLED(CONFIG_OF)) n_phandles = 0; else n_phandles = of_get_n_coupled(rdev); coupled = kcalloc(n_phandles + 1, sizeof(*coupled), GFP_KERNEL); if (!coupled) return -ENOMEM; rdev->coupling_desc.coupled_rdevs = coupled; /* * Every regulator should always have coupling descriptor filled with * at least pointer to itself. */ rdev->coupling_desc.coupled_rdevs[0] = rdev; rdev->coupling_desc.n_coupled = n_phandles + 1; rdev->coupling_desc.n_resolved++; /* regulator isn't coupled */ if (n_phandles == 0) return 0; if (!of_check_coupling_data(rdev)) return -EPERM; mutex_lock(&regulator_list_mutex); rdev->coupling_desc.coupler = regulator_find_coupler(rdev); mutex_unlock(&regulator_list_mutex); if (IS_ERR(rdev->coupling_desc.coupler)) { err = PTR_ERR(rdev->coupling_desc.coupler); rdev_err(rdev, "failed to get coupler: %pe\n", ERR_PTR(err)); return err; } return 0; } static int generic_coupler_attach(struct regulator_coupler *coupler, struct regulator_dev *rdev) { if (rdev->coupling_desc.n_coupled > 2) { rdev_err(rdev, "Voltage balancing for multiple regulator couples is unimplemented\n"); return -EPERM; } if (!rdev->constraints->always_on) { rdev_err(rdev, "Coupling of a non always-on regulator is unimplemented\n"); return -ENOTSUPP; } return 0; } static struct regulator_coupler generic_regulator_coupler = { .attach_regulator = generic_coupler_attach, }; /** * regulator_register - register regulator * @dev: the device that drive the regulator * @regulator_desc: regulator to register * @cfg: runtime configuration for regulator * * Called by regulator drivers to register a regulator. * * Return: Pointer to a valid &struct regulator_dev on success or * an ERR_PTR() encoded negative error number on failure. */ struct regulator_dev * regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *cfg) { const struct regulator_init_data *init_data; struct regulator_config *config = NULL; static atomic_t regulator_no = ATOMIC_INIT(-1); struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; int ret, i; bool resolved_early = false; if (cfg == NULL) return ERR_PTR(-EINVAL); if (cfg->ena_gpiod) dangling_cfg_gpiod = true; if (regulator_desc == NULL) { ret = -EINVAL; goto rinse; } WARN_ON(!dev || !cfg->dev); if (regulator_desc->name == NULL || regulator_desc->ops == NULL) { ret = -EINVAL; goto rinse; } if (regulator_desc->type != REGULATOR_VOLTAGE && regulator_desc->type != REGULATOR_CURRENT) { ret = -EINVAL; goto rinse; } /* Only one of each should be implemented */ WARN_ON(regulator_desc->ops->get_voltage && regulator_desc->ops->get_voltage_sel); WARN_ON(regulator_desc->ops->set_voltage && regulator_desc->ops->set_voltage_sel); /* If we're using selectors we must implement list_voltage. */ if (regulator_desc->ops->get_voltage_sel && !regulator_desc->ops->list_voltage) { ret = -EINVAL; goto rinse; } if (regulator_desc->ops->set_voltage_sel && !regulator_desc->ops->list_voltage) { ret = -EINVAL; goto rinse; } rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) { ret = -ENOMEM; goto rinse; } device_initialize(&rdev->dev); dev_set_drvdata(&rdev->dev, rdev); rdev->dev.class = &regulator_class; spin_lock_init(&rdev->err_lock); /* * Duplicate the config so the driver could override it after * parsing init data. */ config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); if (config == NULL) { ret = -ENOMEM; goto clean; } if (config->init_data) { /* * Providing of_match means the framework is expected to parse * DT to get the init_data. This would conflict with provided * init_data, if set. Warn if it happens. */ if (regulator_desc->of_match) dev_warn(dev, "Using provided init data - OF match ignored\n"); init_data = config->init_data; rdev->dev.of_node = of_node_get(config->of_node); } else { init_data = regulator_of_get_init_data(dev, regulator_desc, config, &rdev->dev.of_node); /* * Sometimes not all resources are probed already so we need to * take that into account. This happens most the time if the * ena_gpiod comes from a gpio extender or something else. */ if (PTR_ERR(init_data) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto clean; } /* * We need to keep track of any GPIO descriptor coming from the * device tree until we have handled it over to the core. If the * config that was passed in to this function DOES NOT contain a * descriptor, and the config after this call DOES contain a * descriptor, we definitely got one from parsing the device * tree. */ if (!cfg->ena_gpiod && config->ena_gpiod) dangling_of_gpiod = true; } ww_mutex_init(&rdev->mutex, &regulator_ww_class); rdev->reg_data = config->driver_data; rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; if (config->regmap) rdev->regmap = config->regmap; else if (dev_get_regmap(dev, NULL)) rdev->regmap = dev_get_regmap(dev, NULL); else if (dev->parent) rdev->regmap = dev_get_regmap(dev->parent, NULL); INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work); if (init_data && init_data->supply_regulator) rdev->supply_name = init_data->supply_regulator; else if (regulator_desc->supply_name) rdev->supply_name = regulator_desc->supply_name; /* register with sysfs */ rdev->dev.parent = config->dev; dev_set_name(&rdev->dev, "regulator.%lu", (unsigned long) atomic_inc_return(&regulator_no)); /* set regulator constraints */ if (init_data) rdev->constraints = kmemdup(&init_data->constraints, sizeof(*rdev->constraints), GFP_KERNEL); else rdev->constraints = kzalloc(sizeof(*rdev->constraints), GFP_KERNEL); if (!rdev->constraints) { ret = -ENOMEM; goto wash; } if (regulator_desc->init_cb) { ret = regulator_desc->init_cb(rdev, config); if (ret < 0) goto wash; } if ((rdev->supply_name && !rdev->supply) && (rdev->constraints->always_on || rdev->constraints->boot_on)) { ret = regulator_resolve_supply(rdev); if (ret) rdev_dbg(rdev, "unable to resolve supply early: %pe\n", ERR_PTR(ret)); resolved_early = true; } if (config->ena_gpiod) { ret = regulator_ena_gpio_request(rdev, config); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO: %pe\n", ERR_PTR(ret)); goto wash; } /* The regulator core took over the GPIO descriptor */ dangling_cfg_gpiod = false; dangling_of_gpiod = false; } ret = set_machine_constraints(rdev); if (ret == -EPROBE_DEFER && !resolved_early) { /* Regulator might be in bypass mode and so needs its supply * to set the constraints */ /* FIXME: this currently triggers a chicken-and-egg problem * when creating -SUPPLY symlink in sysfs to a regulator * that is just being created */ rdev_dbg(rdev, "will resolve supply early: %s\n", rdev->supply_name); ret = regulator_resolve_supply(rdev); if (!ret) ret = set_machine_constraints(rdev); else rdev_dbg(rdev, "unable to resolve supply early: %pe\n", ERR_PTR(ret)); } if (ret < 0) goto wash; ret = regulator_init_coupling(rdev); if (ret < 0) goto wash; /* add consumers devices */ if (init_data) { for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } } if (!rdev->desc->ops->get_voltage && !rdev->desc->ops->list_voltage && !rdev->desc->fixed_uV) rdev->is_switch = true; ret = device_add(&rdev->dev); if (ret != 0) goto unset_supplies; rdev_init_debugfs(rdev); /* try to resolve regulators coupling since a new one was registered */ mutex_lock(&regulator_list_mutex); regulator_resolve_coupling(rdev); mutex_unlock(&regulator_list_mutex); /* try to resolve regulators supply since a new one was registered */ class_for_each_device(&regulator_class, NULL, NULL, regulator_register_resolve_supply); kfree(config); return rdev; unset_supplies: mutex_lock(&regulator_list_mutex); unset_regulator_supplies(rdev); regulator_remove_coupling(rdev); mutex_unlock(&regulator_list_mutex); wash: regulator_put(rdev->supply); kfree(rdev->coupling_desc.coupled_rdevs); mutex_lock(&regulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(&regulator_list_mutex); clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); kfree(config); put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(regulator_register); /** * regulator_unregister - unregister regulator * @rdev: regulator to unregister * * Called by regulator drivers to unregister a regulator. */ void regulator_unregister(struct regulator_dev *rdev) { if (rdev == NULL) return; if (rdev->supply) { while (rdev->use_count--) regulator_disable(rdev->supply); regulator_put(rdev->supply); } flush_work(&rdev->disable_work.work); mutex_lock(&regulator_list_mutex); WARN_ON(rdev->open_count); regulator_remove_coupling(rdev); unset_regulator_supplies(rdev); list_del(&rdev->list); regulator_ena_gpio_free(rdev); device_unregister(&rdev->dev); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); #ifdef CONFIG_SUSPEND /** * regulator_suspend - prepare regulators for system wide suspend * @dev: ``&struct device`` pointer that is passed to _regulator_suspend() * * Configure each regulator with it's suspend operating parameters for state. * * Return: 0 on success or a negative error number on failure. */ static int regulator_suspend(struct device *dev) { struct regulator_dev *rdev = dev_to_rdev(dev); suspend_state_t state = pm_suspend_target_state; int ret; const struct regulator_state *rstate; rstate = regulator_get_suspend_state_check(rdev, state); if (!rstate) return 0; regulator_lock(rdev); ret = __suspend_set_state(rdev, rstate); regulator_unlock(rdev); return ret; } static int regulator_resume(struct device *dev) { suspend_state_t state = pm_suspend_target_state; struct regulator_dev *rdev = dev_to_rdev(dev); struct regulator_state *rstate; int ret = 0; rstate = regulator_get_suspend_state(rdev, state); if (rstate == NULL) return 0; /* Avoid grabbing the lock if we don't need to */ if (!rdev->desc->ops->resume) return 0; regulator_lock(rdev); if (rstate->enabled == ENABLE_IN_SUSPEND || rstate->enabled == DISABLE_IN_SUSPEND) ret = rdev->desc->ops->resume(rdev); regulator_unlock(rdev); return ret; } #else /* !CONFIG_SUSPEND */ #define regulator_suspend NULL #define regulator_resume NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_PM static const struct dev_pm_ops __maybe_unused regulator_pm_ops = { .suspend = regulator_suspend, .resume = regulator_resume, }; #endif const struct class regulator_class = { .name = "regulator", .dev_release = regulator_dev_release, .dev_groups = regulator_dev_groups, #ifdef CONFIG_PM .pm = &regulator_pm_ops, #endif }; /** * regulator_has_full_constraints - the system has fully specified constraints * * Calling this function will cause the regulator API to disable all * regulators which have a zero use count and don't have an always_on * constraint in a late_initcall. * * The intention is that this will become the default behaviour in a * future kernel release so users are encouraged to use this facility * now. */ void regulator_has_full_constraints(void) { has_full_constraints = 1; } EXPORT_SYMBOL_GPL(regulator_has_full_constraints); /** * rdev_get_drvdata - get rdev regulator driver data * @rdev: regulator * * Get rdev regulator driver private data. This call can be used in the * regulator driver context. * * Return: Pointer to regulator driver private data. */ void *rdev_get_drvdata(struct regulator_dev *rdev) { return rdev->reg_data; } EXPORT_SYMBOL_GPL(rdev_get_drvdata); /** * regulator_get_drvdata - get regulator driver data * @regulator: regulator * * Get regulator driver private data. This call can be used in the consumer * driver context when non API regulator specific functions need to be called. * * Return: Pointer to regulator driver private data. */ void *regulator_get_drvdata(struct regulator *regulator) { return regulator->rdev->reg_data; } EXPORT_SYMBOL_GPL(regulator_get_drvdata); /** * regulator_set_drvdata - set regulator driver data * @regulator: regulator * @data: data */ void regulator_set_drvdata(struct regulator *regulator, void *data) { regulator->rdev->reg_data = data; } EXPORT_SYMBOL_GPL(regulator_set_drvdata); /** * rdev_get_id - get regulator ID * @rdev: regulator * * Return: Regulator ID for @rdev. */ int rdev_get_id(struct regulator_dev *rdev) { return rdev->desc->id; } EXPORT_SYMBOL_GPL(rdev_get_id); struct device *rdev_get_dev(struct regulator_dev *rdev) { return &rdev->dev; } EXPORT_SYMBOL_GPL(rdev_get_dev); struct regmap *rdev_get_regmap(struct regulator_dev *rdev) { return rdev->regmap; } EXPORT_SYMBOL_GPL(rdev_get_regmap); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; } EXPORT_SYMBOL_GPL(regulator_get_init_drvdata); #ifdef CONFIG_DEBUG_FS static int supply_map_show(struct seq_file *sf, void *data) { struct regulator_map *map; list_for_each_entry(map, &regulator_map_list, list) { seq_printf(sf, "%s -> %s.%s\n", rdev_get_name(map->regulator), map->dev_name, map->supply); } return 0; } DEFINE_SHOW_ATTRIBUTE(supply_map); struct summary_data { struct seq_file *s; struct regulator_dev *parent; int level; }; static void regulator_summary_show_subtree(struct seq_file *s, struct regulator_dev *rdev, int level); static int regulator_summary_show_children(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); struct summary_data *summary_data = data; if (rdev->supply && rdev->supply->rdev == summary_data->parent) regulator_summary_show_subtree(summary_data->s, rdev, summary_data->level + 1); return 0; } static void regulator_summary_show_subtree(struct seq_file *s, struct regulator_dev *rdev, int level) { struct regulation_constraints *c; struct regulator *consumer; struct summary_data summary_data; unsigned int opmode; if (!rdev) return; opmode = _regulator_get_mode_unlocked(rdev); seq_printf(s, "%*s%-*s %3d %4d %6d %7s ", level * 3 + 1, "", 30 - level * 3, rdev_get_name(rdev), rdev->use_count, rdev->open_count, rdev->bypass_count, regulator_opmode_to_str(opmode)); seq_printf(s, "%5dmV ", regulator_get_voltage_rdev(rdev) / 1000); seq_printf(s, "%5dmA ", _regulator_get_current_limit_unlocked(rdev) / 1000); c = rdev->constraints; if (c) { switch (rdev->desc->type) { case REGULATOR_VOLTAGE: seq_printf(s, "%5dmV %5dmV ", c->min_uV / 1000, c->max_uV / 1000); break; case REGULATOR_CURRENT: seq_printf(s, "%5dmA %5dmA ", c->min_uA / 1000, c->max_uA / 1000); break; } } seq_puts(s, "\n"); list_for_each_entry(consumer, &rdev->consumer_list, list) { if (consumer->dev && consumer->dev->class == &regulator_class) continue; seq_printf(s, "%*s%-*s ", (level + 1) * 3 + 1, "", 30 - (level + 1) * 3, consumer->supply_name ? consumer->supply_name : consumer->dev ? dev_name(consumer->dev) : "deviceless"); switch (rdev->desc->type) { case REGULATOR_VOLTAGE: seq_printf(s, "%3d %33dmA%c%5dmV %5dmV", consumer->enable_count, consumer->uA_load / 1000, consumer->uA_load && !consumer->enable_count ? '*' : ' ', consumer->voltage[PM_SUSPEND_ON].min_uV / 1000, consumer->voltage[PM_SUSPEND_ON].max_uV / 1000); break; case REGULATOR_CURRENT: break; } seq_puts(s, "\n"); } summary_data.s = s; summary_data.level = level; summary_data.parent = rdev; class_for_each_device(&regulator_class, NULL, &summary_data, regulator_summary_show_children); } struct summary_lock_data { struct ww_acquire_ctx *ww_ctx; struct regulator_dev **new_contended_rdev; struct regulator_dev **old_contended_rdev; }; static int regulator_summary_lock_one(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); struct summary_lock_data *lock_data = data; int ret = 0; if (rdev != *lock_data->old_contended_rdev) { ret = regulator_lock_nested(rdev, lock_data->ww_ctx); if (ret == -EDEADLK) *lock_data->new_contended_rdev = rdev; else WARN_ON_ONCE(ret); } else { *lock_data->old_contended_rdev = NULL; } return ret; } static int regulator_summary_unlock_one(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); struct summary_lock_data *lock_data = data; if (lock_data) { if (rdev == *lock_data->new_contended_rdev) return -EDEADLK; } regulator_unlock(rdev); return 0; } static int regulator_summary_lock_all(struct ww_acquire_ctx *ww_ctx, struct regulator_dev **new_contended_rdev, struct regulator_dev **old_contended_rdev) { struct summary_lock_data lock_data; int ret; lock_data.ww_ctx = ww_ctx; lock_data.new_contended_rdev = new_contended_rdev; lock_data.old_contended_rdev = old_contended_rdev; ret = class_for_each_device(&regulator_class, NULL, &lock_data, regulator_summary_lock_one); if (ret) class_for_each_device(&regulator_class, NULL, &lock_data, regulator_summary_unlock_one); return ret; } static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx) { struct regulator_dev *new_contended_rdev = NULL; struct regulator_dev *old_contended_rdev = NULL; int err; mutex_lock(&regulator_list_mutex); ww_acquire_init(ww_ctx, &regulator_ww_class); do { if (new_contended_rdev) { ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx); old_contended_rdev = new_contended_rdev; old_contended_rdev->ref_cnt++; old_contended_rdev->mutex_owner = current; } err = regulator_summary_lock_all(ww_ctx, &new_contended_rdev, &old_contended_rdev); if (old_contended_rdev) regulator_unlock(old_contended_rdev); } while (err == -EDEADLK); ww_acquire_done(ww_ctx); } static void regulator_summary_unlock(struct ww_acquire_ctx *ww_ctx) { class_for_each_device(&regulator_class, NULL, NULL, regulator_summary_unlock_one); ww_acquire_fini(ww_ctx); mutex_unlock(&regulator_list_mutex); } static int regulator_summary_show_roots(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); struct seq_file *s = data; if (!rdev->supply) regulator_summary_show_subtree(s, rdev, 0); return 0; } static int regulator_summary_show(struct seq_file *s, void *data) { struct ww_acquire_ctx ww_ctx; seq_puts(s, " regulator use open bypass opmode voltage current min max\n"); seq_puts(s, "---------------------------------------------------------------------------------------\n"); regulator_summary_lock(&ww_ctx); class_for_each_device(&regulator_class, NULL, s, regulator_summary_show_roots); regulator_summary_unlock(&ww_ctx); return 0; } DEFINE_SHOW_ATTRIBUTE(regulator_summary); #endif /* CONFIG_DEBUG_FS */ static int __init regulator_init(void) { int ret; ret = class_register(&regulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); if (IS_ERR(debugfs_root)) pr_debug("regulator: Failed to create debugfs directory\n"); #ifdef CONFIG_DEBUG_FS debugfs_create_file("supply_map", 0444, debugfs_root, NULL, &supply_map_fops); debugfs_create_file("regulator_summary", 0444, debugfs_root, NULL, &regulator_summary_fops); #endif regulator_dummy_init(); regulator_coupler_register(&generic_regulator_coupler); return ret; } /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); struct regulation_constraints *c = rdev->constraints; int ret; if (c && c->always_on) return 0; if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) return 0; regulator_lock(rdev); if (rdev->use_count) goto unlock; /* If reading the status failed, assume that it's off. */ if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { /* We log since this may kill the system if it goes * wrong. */ rdev_info(rdev, "disabling\n"); ret = _regulator_do_disable(rdev); if (ret != 0) rdev_err(rdev, "couldn't disable: %pe\n", ERR_PTR(ret)); } else { /* The intention is that in future we will * assume that full constraints are provided * so warn even if we aren't going to do * anything here. */ rdev_warn(rdev, "incomplete constraints, leaving on\n"); } unlock: regulator_unlock(rdev); return 0; } static bool regulator_ignore_unused; static int __init regulator_ignore_unused_setup(char *__unused) { regulator_ignore_unused = true; return 1; } __setup("regulator_ignore_unused", regulator_ignore_unused_setup); static void regulator_init_complete_work_function(struct work_struct *work) { /* * Regulators may had failed to resolve their input supplies * when were registered, either because the input supply was * not registered yet or because its parent device was not * bound yet. So attempt to resolve the input supplies for * pending regulators before trying to disable unused ones. */ class_for_each_device(&regulator_class, NULL, NULL, regulator_register_resolve_supply); /* * For debugging purposes, it may be useful to prevent unused * regulators from being disabled. */ if (regulator_ignore_unused) { pr_warn("regulator: Not disabling unused regulators\n"); return; } /* If we have a full configuration then disable any regulators * we have permission to change the status for and which are * not in use or always_on. This is effectively the default * for DT and ACPI as they have full constraints. */ class_for_each_device(&regulator_class, NULL, NULL, regulator_late_cleanup); } static DECLARE_DELAYED_WORK(regulator_init_complete_work, regulator_init_complete_work_function); static int __init regulator_init_complete(void) { /* * Since DT doesn't provide an idiomatic mechanism for * enabling full constraints and since it's much more natural * with DT to provide them just assume that a DT enabled * system has full constraints. */ if (of_have_populated_dt()) has_full_constraints = true; /* * We punt completion for an arbitrary amount of time since * systems like distros will load many drivers from userspace * so consumers might not always be ready yet, this is * particularly an issue with laptops where this might bounce * the display off then on. Ideally we'd get a notification * from userspace when this happens but we don't so just wait * a bit and hope we waited long enough. It'd be better if * we'd only do this on systems that need it, and a kernel * command line option might be useful. */ schedule_delayed_work(&regulator_init_complete_work, msecs_to_jiffies(30000)); return 0; } late_initcall_sync(regulator_init_complete);
4 3 4 1 8 7 6 5 4 8 2 2 1 1 4 4 1 4 3 2 3 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 // SPDX-License-Identifier: GPL-2.0+ /* * MaxLinear/Exar USB to Serial driver * * Copyright (c) 2020 Manivannan Sadhasivam <mani@kernel.org> * Copyright (c) 2021 Johan Hovold <johan@kernel.org> * * Based on the initial driver written by Patong Yang: * * https://lore.kernel.org/r/20180404070634.nhspvmxcjwfgjkcv@advantechmxl-desktop * * Copyright (c) 2018 Patong Yang <patong.mxl@gmail.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/serial.h> struct xr_txrx_clk_mask { u16 tx; u16 rx0; u16 rx1; }; #define XR_INT_OSC_HZ 48000000U #define XR21V141X_MIN_SPEED 46U #define XR21V141X_MAX_SPEED XR_INT_OSC_HZ /* XR21V141X register blocks */ #define XR21V141X_UART_REG_BLOCK 0 #define XR21V141X_UM_REG_BLOCK 4 #define XR21V141X_UART_CUSTOM_BLOCK 0x66 /* XR21V141X UART registers */ #define XR21V141X_CLOCK_DIVISOR_0 0x04 #define XR21V141X_CLOCK_DIVISOR_1 0x05 #define XR21V141X_CLOCK_DIVISOR_2 0x06 #define XR21V141X_TX_CLOCK_MASK_0 0x07 #define XR21V141X_TX_CLOCK_MASK_1 0x08 #define XR21V141X_RX_CLOCK_MASK_0 0x09 #define XR21V141X_RX_CLOCK_MASK_1 0x0a #define XR21V141X_REG_FORMAT 0x0b /* XR21V141X UART Manager registers */ #define XR21V141X_UM_FIFO_ENABLE_REG 0x10 #define XR21V141X_UM_ENABLE_TX_FIFO 0x01 #define XR21V141X_UM_ENABLE_RX_FIFO 0x02 #define XR21V141X_UM_RX_FIFO_RESET 0x18 #define XR21V141X_UM_TX_FIFO_RESET 0x1c #define XR_UART_ENABLE_TX 0x1 #define XR_UART_ENABLE_RX 0x2 #define XR_GPIO_RI BIT(0) #define XR_GPIO_CD BIT(1) #define XR_GPIO_DSR BIT(2) #define XR_GPIO_DTR BIT(3) #define XR_GPIO_CTS BIT(4) #define XR_GPIO_RTS BIT(5) #define XR_GPIO_CLK BIT(6) #define XR_GPIO_XEN BIT(7) #define XR_GPIO_TXT BIT(8) #define XR_GPIO_RXT BIT(9) #define XR_UART_DATA_MASK GENMASK(3, 0) #define XR_UART_DATA_7 0x7 #define XR_UART_DATA_8 0x8 #define XR_UART_PARITY_MASK GENMASK(6, 4) #define XR_UART_PARITY_SHIFT 4 #define XR_UART_PARITY_NONE (0x0 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_ODD (0x1 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_EVEN (0x2 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_MARK (0x3 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_SPACE (0x4 << XR_UART_PARITY_SHIFT) #define XR_UART_STOP_MASK BIT(7) #define XR_UART_STOP_SHIFT 7 #define XR_UART_STOP_1 (0x0 << XR_UART_STOP_SHIFT) #define XR_UART_STOP_2 (0x1 << XR_UART_STOP_SHIFT) #define XR_UART_FLOW_MODE_NONE 0x0 #define XR_UART_FLOW_MODE_HW 0x1 #define XR_UART_FLOW_MODE_SW 0x2 #define XR_GPIO_MODE_SEL_MASK GENMASK(2, 0) #define XR_GPIO_MODE_SEL_RTS_CTS 0x1 #define XR_GPIO_MODE_SEL_DTR_DSR 0x2 #define XR_GPIO_MODE_SEL_RS485 0x3 #define XR_GPIO_MODE_SEL_RS485_ADDR 0x4 #define XR_GPIO_MODE_RS485_TX_H 0x8 #define XR_GPIO_MODE_TX_TOGGLE 0x100 #define XR_GPIO_MODE_RX_TOGGLE 0x200 #define XR_FIFO_RESET 0x1 #define XR_CUSTOM_DRIVER_ACTIVE 0x1 static int xr21v141x_uart_enable(struct usb_serial_port *port); static int xr21v141x_uart_disable(struct usb_serial_port *port); static int xr21v141x_fifo_reset(struct usb_serial_port *port); static void xr21v141x_set_line_settings(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); struct xr_type { int reg_width; u8 reg_recipient; u8 set_reg; u8 get_reg; u16 uart_enable; u16 flow_control; u16 xon_char; u16 xoff_char; u16 tx_break; u16 gpio_mode; u16 gpio_direction; u16 gpio_set; u16 gpio_clear; u16 gpio_status; u16 tx_fifo_reset; u16 rx_fifo_reset; u16 custom_driver; bool have_5_6_bit_mode; bool have_xmit_toggle; int (*enable)(struct usb_serial_port *port); int (*disable)(struct usb_serial_port *port); int (*fifo_reset)(struct usb_serial_port *port); void (*set_line_settings)(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); }; enum xr_type_id { XR21V141X, XR21B142X, XR21B1411, XR2280X, XR_TYPE_COUNT, }; static const struct xr_type xr_types[] = { [XR21V141X] = { .reg_width = 8, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x00, .get_reg = 0x01, .uart_enable = 0x03, .flow_control = 0x0c, .xon_char = 0x10, .xoff_char = 0x11, .tx_break = 0x14, .gpio_mode = 0x1a, .gpio_direction = 0x1b, .gpio_set = 0x1d, .gpio_clear = 0x1e, .gpio_status = 0x1f, .enable = xr21v141x_uart_enable, .disable = xr21v141x_uart_disable, .fifo_reset = xr21v141x_fifo_reset, .set_line_settings = xr21v141x_set_line_settings, }, [XR21B142X] = { .reg_width = 16, .reg_recipient = USB_RECIP_INTERFACE, .set_reg = 0x00, .get_reg = 0x00, .uart_enable = 0x00, .flow_control = 0x06, .xon_char = 0x07, .xoff_char = 0x08, .tx_break = 0x0a, .gpio_mode = 0x0c, .gpio_direction = 0x0d, .gpio_set = 0x0e, .gpio_clear = 0x0f, .gpio_status = 0x10, .tx_fifo_reset = 0x40, .rx_fifo_reset = 0x43, .custom_driver = 0x60, .have_5_6_bit_mode = true, .have_xmit_toggle = true, }, [XR21B1411] = { .reg_width = 12, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x00, .get_reg = 0x01, .uart_enable = 0xc00, .flow_control = 0xc06, .xon_char = 0xc07, .xoff_char = 0xc08, .tx_break = 0xc0a, .gpio_mode = 0xc0c, .gpio_direction = 0xc0d, .gpio_set = 0xc0e, .gpio_clear = 0xc0f, .gpio_status = 0xc10, .tx_fifo_reset = 0xc80, .rx_fifo_reset = 0xcc0, .custom_driver = 0x20d, }, [XR2280X] = { .reg_width = 16, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x05, .get_reg = 0x05, .uart_enable = 0x40, .flow_control = 0x46, .xon_char = 0x47, .xoff_char = 0x48, .tx_break = 0x4a, .gpio_mode = 0x4c, .gpio_direction = 0x4d, .gpio_set = 0x4e, .gpio_clear = 0x4f, .gpio_status = 0x50, .tx_fifo_reset = 0x60, .rx_fifo_reset = 0x63, .custom_driver = 0x81, }, }; struct xr_data { const struct xr_type *type; u8 channel; /* zero-based index or interface number */ struct serial_rs485 rs485; }; static int xr_set_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 val) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; struct usb_serial *serial = port->serial; int ret; ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), type->set_reg, USB_DIR_OUT | USB_TYPE_VENDOR | type->reg_recipient, val, (channel << 8) | reg, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_err(&port->dev, "Failed to set reg 0x%02x: %d\n", reg, ret); return ret; } return 0; } static int xr_get_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 *val) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; struct usb_serial *serial = port->serial; u8 *dmabuf; int ret, len; if (type->reg_width == 8) len = 1; else len = 2; dmabuf = kmalloc(len, GFP_KERNEL); if (!dmabuf) return -ENOMEM; ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), type->get_reg, USB_DIR_IN | USB_TYPE_VENDOR | type->reg_recipient, 0, (channel << 8) | reg, dmabuf, len, USB_CTRL_GET_TIMEOUT); if (ret == len) { if (len == 2) *val = le16_to_cpup((__le16 *)dmabuf); else *val = *dmabuf; ret = 0; } else { dev_err(&port->dev, "Failed to get reg 0x%02x: %d\n", reg, ret); if (ret >= 0) ret = -EIO; } kfree(dmabuf); return ret; } static int xr_set_reg_uart(struct usb_serial_port *port, u16 reg, u16 val) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg(port, data->channel, reg, val); } static int xr_get_reg_uart(struct usb_serial_port *port, u16 reg, u16 *val) { struct xr_data *data = usb_get_serial_port_data(port); return xr_get_reg(port, data->channel, reg, val); } static int xr_set_reg_um(struct usb_serial_port *port, u8 reg_base, u8 val) { struct xr_data *data = usb_get_serial_port_data(port); u8 reg; reg = reg_base + data->channel; return xr_set_reg(port, XR21V141X_UM_REG_BLOCK, reg, val); } static int __xr_uart_enable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg_uart(port, data->type->uart_enable, XR_UART_ENABLE_TX | XR_UART_ENABLE_RX); } static int __xr_uart_disable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg_uart(port, data->type->uart_enable, 0); } /* * According to datasheet, below is the recommended sequence for enabling UART * module in XR21V141X: * * Enable Tx FIFO * Enable Tx and Rx * Enable Rx FIFO */ static int xr21v141x_uart_enable(struct usb_serial_port *port) { int ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, XR21V141X_UM_ENABLE_TX_FIFO); if (ret) return ret; ret = __xr_uart_enable(port); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, XR21V141X_UM_ENABLE_TX_FIFO | XR21V141X_UM_ENABLE_RX_FIFO); if (ret) __xr_uart_disable(port); return ret; } static int xr21v141x_uart_disable(struct usb_serial_port *port) { int ret; ret = __xr_uart_disable(port); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, 0); return ret; } static int xr_uart_enable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); if (data->type->enable) return data->type->enable(port); return __xr_uart_enable(port); } static int xr_uart_disable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); if (data->type->disable) return data->type->disable(port); return __xr_uart_disable(port); } static int xr21v141x_fifo_reset(struct usb_serial_port *port) { int ret; ret = xr_set_reg_um(port, XR21V141X_UM_TX_FIFO_RESET, XR_FIFO_RESET); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_RX_FIFO_RESET, XR_FIFO_RESET); if (ret) return ret; return 0; } static int xr_fifo_reset(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); int ret; if (data->type->fifo_reset) return data->type->fifo_reset(port); ret = xr_set_reg_uart(port, data->type->tx_fifo_reset, XR_FIFO_RESET); if (ret) return ret; ret = xr_set_reg_uart(port, data->type->rx_fifo_reset, XR_FIFO_RESET); if (ret) return ret; return 0; } static int xr_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); u16 status; int ret; ret = xr_get_reg_uart(port, data->type->gpio_status, &status); if (ret) return ret; /* * Modem control pins are active low, so reading '0' means it is active * and '1' means not active. */ ret = ((status & XR_GPIO_DTR) ? 0 : TIOCM_DTR) | ((status & XR_GPIO_RTS) ? 0 : TIOCM_RTS) | ((status & XR_GPIO_CTS) ? 0 : TIOCM_CTS) | ((status & XR_GPIO_DSR) ? 0 : TIOCM_DSR) | ((status & XR_GPIO_RI) ? 0 : TIOCM_RI) | ((status & XR_GPIO_CD) ? 0 : TIOCM_CD); return ret; } static int xr_tiocmset_port(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 gpio_set = 0; u16 gpio_clr = 0; int ret = 0; /* Modem control pins are active low, so set & clr are swapped */ if (set & TIOCM_RTS) gpio_clr |= XR_GPIO_RTS; if (set & TIOCM_DTR) gpio_clr |= XR_GPIO_DTR; if (clear & TIOCM_RTS) gpio_set |= XR_GPIO_RTS; if (clear & TIOCM_DTR) gpio_set |= XR_GPIO_DTR; /* Writing '0' to gpio_{set/clr} bits has no effect, so no need to do */ if (gpio_clr) ret = xr_set_reg_uart(port, type->gpio_clear, gpio_clr); if (gpio_set) ret = xr_set_reg_uart(port, type->gpio_set, gpio_set); return ret; } static int xr_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return xr_tiocmset_port(port, set, clear); } static void xr_dtr_rts(struct usb_serial_port *port, int on) { if (on) xr_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0); else xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS); } static int xr_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 state; if (break_state == 0) state = 0; else state = GENMASK(type->reg_width - 1, 0); dev_dbg(&port->dev, "Turning break %s\n", state == 0 ? "off" : "on"); return xr_set_reg_uart(port, type->tx_break, state); } /* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */ static const struct xr_txrx_clk_mask xr21v141x_txrx_clk_masks[] = { { 0x000, 0x000, 0x000 }, { 0x000, 0x000, 0x000 }, { 0x100, 0x000, 0x100 }, { 0x020, 0x400, 0x020 }, { 0x010, 0x100, 0x010 }, { 0x208, 0x040, 0x208 }, { 0x104, 0x820, 0x108 }, { 0x844, 0x210, 0x884 }, { 0x444, 0x110, 0x444 }, { 0x122, 0x888, 0x224 }, { 0x912, 0x448, 0x924 }, { 0x492, 0x248, 0x492 }, { 0x252, 0x928, 0x292 }, { 0x94a, 0x4a4, 0xa52 }, { 0x52a, 0xaa4, 0x54a }, { 0xaaa, 0x954, 0x4aa }, { 0xaaa, 0x554, 0xaaa }, { 0x555, 0xad4, 0x5aa }, { 0xb55, 0xab4, 0x55a }, { 0x6b5, 0x5ac, 0xb56 }, { 0x5b5, 0xd6c, 0x6d6 }, { 0xb6d, 0xb6a, 0xdb6 }, { 0x76d, 0x6da, 0xbb6 }, { 0xedd, 0xdda, 0x76e }, { 0xddd, 0xbba, 0xeee }, { 0x7bb, 0xf7a, 0xdde }, { 0xf7b, 0xef6, 0x7de }, { 0xdf7, 0xbf6, 0xf7e }, { 0x7f7, 0xfee, 0xefe }, { 0xfdf, 0xfbe, 0x7fe }, { 0xf7f, 0xefe, 0xffe }, { 0xfff, 0xffe, 0xffd }, }; static int xr21v141x_set_baudrate(struct tty_struct *tty, struct usb_serial_port *port) { u32 divisor, baud, idx; u16 tx_mask, rx_mask; int ret; baud = tty->termios.c_ospeed; if (!baud) return 0; baud = clamp(baud, XR21V141X_MIN_SPEED, XR21V141X_MAX_SPEED); divisor = XR_INT_OSC_HZ / baud; idx = ((32 * XR_INT_OSC_HZ) / baud) & 0x1f; tx_mask = xr21v141x_txrx_clk_masks[idx].tx; if (divisor & 0x01) rx_mask = xr21v141x_txrx_clk_masks[idx].rx1; else rx_mask = xr21v141x_txrx_clk_masks[idx].rx0; dev_dbg(&port->dev, "Setting baud rate: %u\n", baud); /* * XR21V141X uses fractional baud rate generator with 48MHz internal * oscillator and 19-bit programmable divisor. So theoretically it can * generate most commonly used baud rates with high accuracy. */ ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_0, divisor & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_1, (divisor >> 8) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_2, (divisor >> 16) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_0, tx_mask & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_1, (tx_mask >> 8) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_0, rx_mask & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_1, (rx_mask >> 8) & 0xff); if (ret) return ret; tty_encode_baud_rate(tty, baud, baud); return 0; } static void xr_set_flow_mode(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 flow, gpio_mode; bool rs485_enabled; int ret; ret = xr_get_reg_uart(port, type->gpio_mode, &gpio_mode); if (ret) return; /* * According to the datasheets, the UART needs to be disabled while * writing to the FLOW_CONTROL register (XR21V141X), or any register * but GPIO_SET, GPIO_CLEAR, TX_BREAK and ERROR_STATUS (XR21B142X). */ xr_uart_disable(port); /* Set GPIO mode for controlling the pins manually by default. */ gpio_mode &= ~XR_GPIO_MODE_SEL_MASK; rs485_enabled = !!(data->rs485.flags & SER_RS485_ENABLED); if (rs485_enabled) { dev_dbg(&port->dev, "Enabling RS-485\n"); gpio_mode |= XR_GPIO_MODE_SEL_RS485; if (data->rs485.flags & SER_RS485_RTS_ON_SEND) gpio_mode &= ~XR_GPIO_MODE_RS485_TX_H; else gpio_mode |= XR_GPIO_MODE_RS485_TX_H; } if (C_CRTSCTS(tty) && C_BAUD(tty) != B0 && !rs485_enabled) { dev_dbg(&port->dev, "Enabling hardware flow ctrl\n"); gpio_mode |= XR_GPIO_MODE_SEL_RTS_CTS; flow = XR_UART_FLOW_MODE_HW; } else if (I_IXON(tty)) { u8 start_char = START_CHAR(tty); u8 stop_char = STOP_CHAR(tty); dev_dbg(&port->dev, "Enabling sw flow ctrl\n"); flow = XR_UART_FLOW_MODE_SW; xr_set_reg_uart(port, type->xon_char, start_char); xr_set_reg_uart(port, type->xoff_char, stop_char); } else { dev_dbg(&port->dev, "Disabling flow ctrl\n"); flow = XR_UART_FLOW_MODE_NONE; } xr_set_reg_uart(port, type->flow_control, flow); xr_set_reg_uart(port, type->gpio_mode, gpio_mode); xr_uart_enable(port); if (C_BAUD(tty) == B0) xr_dtr_rts(port, 0); else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) xr_dtr_rts(port, 1); } static void xr21v141x_set_line_settings(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct ktermios *termios = &tty->termios; u8 bits = 0; int ret; if (!old_termios || (tty->termios.c_ospeed != old_termios->c_ospeed)) xr21v141x_set_baudrate(tty, port); switch (C_CSIZE(tty)) { case CS5: case CS6: /* CS5 and CS6 are not supported, so just restore old setting */ termios->c_cflag &= ~CSIZE; if (old_termios) termios->c_cflag |= old_termios->c_cflag & CSIZE; else termios->c_cflag |= CS8; if (C_CSIZE(tty) == CS7) bits |= XR_UART_DATA_7; else bits |= XR_UART_DATA_8; break; case CS7: bits |= XR_UART_DATA_7; break; case CS8: default: bits |= XR_UART_DATA_8; break; } if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) bits |= XR_UART_PARITY_MARK; else bits |= XR_UART_PARITY_SPACE; } else { if (C_PARODD(tty)) bits |= XR_UART_PARITY_ODD; else bits |= XR_UART_PARITY_EVEN; } } if (C_CSTOPB(tty)) bits |= XR_UART_STOP_2; else bits |= XR_UART_STOP_1; ret = xr_set_reg_uart(port, XR21V141X_REG_FORMAT, bits); if (ret) return; } static void xr_cdc_set_line_coding(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); struct usb_host_interface *alt = port->serial->interface->cur_altsetting; struct usb_device *udev = port->serial->dev; struct usb_cdc_line_coding *lc; int ret; lc = kzalloc(sizeof(*lc), GFP_KERNEL); if (!lc) return; if (tty->termios.c_ospeed) lc->dwDTERate = cpu_to_le32(tty->termios.c_ospeed); else lc->dwDTERate = cpu_to_le32(9600); if (C_CSTOPB(tty)) lc->bCharFormat = USB_CDC_2_STOP_BITS; else lc->bCharFormat = USB_CDC_1_STOP_BITS; if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) lc->bParityType = USB_CDC_MARK_PARITY; else lc->bParityType = USB_CDC_SPACE_PARITY; } else { if (C_PARODD(tty)) lc->bParityType = USB_CDC_ODD_PARITY; else lc->bParityType = USB_CDC_EVEN_PARITY; } } else { lc->bParityType = USB_CDC_NO_PARITY; } if (!data->type->have_5_6_bit_mode && (C_CSIZE(tty) == CS5 || C_CSIZE(tty) == CS6)) { tty->termios.c_cflag &= ~CSIZE; if (old_termios) tty->termios.c_cflag |= old_termios->c_cflag & CSIZE; else tty->termios.c_cflag |= CS8; } switch (C_CSIZE(tty)) { case CS5: lc->bDataBits = 5; break; case CS6: lc->bDataBits = 6; break; case CS7: lc->bDataBits = 7; break; case CS8: default: lc->bDataBits = 8; break; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_CDC_REQ_SET_LINE_CODING, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, alt->desc.bInterfaceNumber, lc, sizeof(*lc), USB_CTRL_SET_TIMEOUT); if (ret < 0) dev_err(&port->dev, "Failed to set line coding: %d\n", ret); kfree(lc); } static void xr_sanitize_serial_rs485(struct serial_rs485 *rs485) { if (!(rs485->flags & SER_RS485_ENABLED)) { memset(rs485, 0, sizeof(*rs485)); return; } /* RTS always toggles after TX */ if (rs485->flags & SER_RS485_RTS_ON_SEND) rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; else rs485->flags |= SER_RS485_RTS_AFTER_SEND; /* Only the flags are implemented at the moment */ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND; rs485->delay_rts_before_send = 0; rs485->delay_rts_after_send = 0; memset(rs485->padding, 0, sizeof(rs485->padding)); } static int xr_get_rs485_config(struct tty_struct *tty, struct serial_rs485 __user *argp) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); down_read(&tty->termios_rwsem); if (copy_to_user(argp, &data->rs485, sizeof(data->rs485))) { up_read(&tty->termios_rwsem); return -EFAULT; } up_read(&tty->termios_rwsem); return 0; } static int xr_set_rs485_config(struct tty_struct *tty, struct serial_rs485 __user *argp) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); struct serial_rs485 rs485; if (copy_from_user(&rs485, argp, sizeof(rs485))) return -EFAULT; xr_sanitize_serial_rs485(&rs485); down_write(&tty->termios_rwsem); data->rs485 = rs485; xr_set_flow_mode(tty, port, NULL); up_write(&tty->termios_rwsem); if (copy_to_user(argp, &rs485, sizeof(rs485))) return -EFAULT; return 0; } static int xr_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; switch (cmd) { case TIOCGRS485: return xr_get_rs485_config(tty, argp); case TIOCSRS485: return xr_set_rs485_config(tty, argp); } return -ENOIOCTLCMD; } static void xr_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); /* * XR21V141X does not have a CUSTOM_DRIVER flag and always enters CDC * mode upon receiving CDC requests. */ if (data->type->set_line_settings) data->type->set_line_settings(tty, port, old_termios); else xr_cdc_set_line_coding(tty, port, old_termios); xr_set_flow_mode(tty, port, old_termios); } static int xr_open(struct tty_struct *tty, struct usb_serial_port *port) { int ret; ret = xr_fifo_reset(port); if (ret) return ret; ret = xr_uart_enable(port); if (ret) { dev_err(&port->dev, "Failed to enable UART\n"); return ret; } /* Setup termios */ if (tty) xr_set_termios(tty, port, NULL); ret = usb_serial_generic_open(tty, port); if (ret) { xr_uart_disable(port); return ret; } return 0; } static void xr_close(struct usb_serial_port *port) { usb_serial_generic_close(port); xr_uart_disable(port); } static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_interface *control = serial->interface; struct usb_host_interface *alt = control->cur_altsetting; struct usb_cdc_parsed_header hdrs; struct usb_cdc_union_desc *desc; struct usb_interface *data; int ret; ret = cdc_parse_cdc_header(&hdrs, control, alt->extra, alt->extralen); if (ret < 0) return -ENODEV; desc = hdrs.usb_cdc_union_desc; if (!desc) return -ENODEV; data = usb_ifnum_to_if(serial->dev, desc->bSlaveInterface0); if (!data) return -ENODEV; ret = usb_serial_claim_interface(serial, data); if (ret) return ret; usb_set_serial_data(serial, (void *)id->driver_info); return 0; } static int xr_gpio_init(struct usb_serial_port *port, const struct xr_type *type) { u16 mask, mode; int ret; /* * Configure all pins as GPIO except for Receive and Transmit Toggle. */ mode = 0; if (type->have_xmit_toggle) mode |= XR_GPIO_MODE_RX_TOGGLE | XR_GPIO_MODE_TX_TOGGLE; ret = xr_set_reg_uart(port, type->gpio_mode, mode); if (ret) return ret; /* * Configure DTR and RTS as outputs and make sure they are deasserted * (active low), and configure RI, CD, DSR and CTS as inputs. */ mask = XR_GPIO_DTR | XR_GPIO_RTS; ret = xr_set_reg_uart(port, type->gpio_direction, mask); if (ret) return ret; ret = xr_set_reg_uart(port, type->gpio_set, mask); if (ret) return ret; return 0; } static int xr_port_probe(struct usb_serial_port *port) { struct usb_interface_descriptor *desc; const struct xr_type *type; struct xr_data *data; enum xr_type_id type_id; int ret; type_id = (int)(unsigned long)usb_get_serial_data(port->serial); type = &xr_types[type_id]; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->type = type; desc = &port->serial->interface->cur_altsetting->desc; if (type_id == XR21V141X) data->channel = desc->bInterfaceNumber / 2; else data->channel = desc->bInterfaceNumber; usb_set_serial_port_data(port, data); if (type->custom_driver) { ret = xr_set_reg_uart(port, type->custom_driver, XR_CUSTOM_DRIVER_ACTIVE); if (ret) goto err_free; } ret = xr_gpio_init(port, type); if (ret) goto err_free; return 0; err_free: kfree(data); return ret; } static void xr_port_remove(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); kfree(data); } #define XR_DEVICE(vid, pid, type) \ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_COMM), \ .driver_info = (type) static const struct usb_device_id id_table[] = { { XR_DEVICE(0x04e2, 0x1400, XR2280X) }, { XR_DEVICE(0x04e2, 0x1401, XR2280X) }, { XR_DEVICE(0x04e2, 0x1402, XR2280X) }, { XR_DEVICE(0x04e2, 0x1403, XR2280X) }, { XR_DEVICE(0x04e2, 0x1410, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1411, XR21B1411) }, { XR_DEVICE(0x04e2, 0x1412, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1414, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1420, XR21B142X) }, { XR_DEVICE(0x04e2, 0x1422, XR21B142X) }, { XR_DEVICE(0x04e2, 0x1424, XR21B142X) }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver xr_device = { .driver = { .name = "xr_serial", }, .id_table = id_table, .num_ports = 1, .probe = xr_probe, .port_probe = xr_port_probe, .port_remove = xr_port_remove, .open = xr_open, .close = xr_close, .break_ctl = xr_break_ctl, .set_termios = xr_set_termios, .tiocmget = xr_tiocmget, .tiocmset = xr_tiocmset, .ioctl = xr_ioctl, .dtr_rts = xr_dtr_rts }; static struct usb_serial_driver * const serial_drivers[] = { &xr_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR("Manivannan Sadhasivam <mani@kernel.org>"); MODULE_DESCRIPTION("MaxLinear/Exar USB to Serial driver"); MODULE_LICENSE("GPL");
5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 // SPDX-License-Identifier: GPL-2.0-or-later /* * Generic address resolution entity * * Authors: * net_random Alan Cox * net_ratelimit Andi Kleen * in{4,6}_pton YOSHIFUJI Hideaki, Copyright (C)2006 USAGI/WIDE Project * * Created by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/string.h> #include <linux/types.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/ratelimit.h> #include <linux/socket.h> #include <net/sock.h> #include <net/net_ratelimit.h> #include <net/ipv6.h> #include <asm/byteorder.h> #include <linux/uaccess.h> DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10); /* * All net warning printk()s should be guarded by this function. */ int net_ratelimit(void) { return __ratelimit(&net_ratelimit_state); } EXPORT_SYMBOL(net_ratelimit); /* * Convert an ASCII string to binary IP. * This is outside of net/ipv4/ because various code that uses IP addresses * is otherwise not dependent on the TCP/IP stack. */ __be32 in_aton(const char *str) { unsigned int l; unsigned int val; int i; l = 0; for (i = 0; i < 4; i++) { l <<= 8; if (*str != '\0') { val = 0; while (*str != '\0' && *str != '.' && *str != '\n') { val *= 10; val += *str - '0'; str++; } l |= val; if (*str != '\0') str++; } } return htonl(l); } EXPORT_SYMBOL(in_aton); #define IN6PTON_XDIGIT 0x00010000 #define IN6PTON_DIGIT 0x00020000 #define IN6PTON_COLON_MASK 0x00700000 #define IN6PTON_COLON_1 0x00100000 /* single : requested */ #define IN6PTON_COLON_2 0x00200000 /* second : requested */ #define IN6PTON_COLON_1_2 0x00400000 /* :: requested */ #define IN6PTON_DOT 0x00800000 /* . */ #define IN6PTON_DELIM 0x10000000 #define IN6PTON_NULL 0x20000000 /* first/tail */ #define IN6PTON_UNKNOWN 0x40000000 static inline int xdigit2bin(char c, int delim) { int val; if (c == delim || c == '\0') return IN6PTON_DELIM; if (c == ':') return IN6PTON_COLON_MASK; if (c == '.') return IN6PTON_DOT; val = hex_to_bin(c); if (val >= 0) return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0); if (delim == -1) return IN6PTON_DELIM; return IN6PTON_UNKNOWN; } /** * in4_pton - convert an IPv4 address from literal to binary representation * @src: the start of the IPv4 address string * @srclen: the length of the string, -1 means strlen(src) * @dst: the binary (u8[4] array) representation of the IPv4 address * @delim: the delimiter of the IPv4 address in @src, -1 means no delimiter * @end: A pointer to the end of the parsed string will be placed here * * Return one on success, return zero when any error occurs * and @end will point to the end of the parsed string. * */ int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end) { const char *s; u8 *d; u8 dbuf[4]; int ret = 0; int i; int w = 0; if (srclen < 0) srclen = strlen(src); s = src; d = dbuf; i = 0; while (1) { int c; c = xdigit2bin(srclen > 0 ? *s : '\0', delim); if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) { goto out; } if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) { if (w == 0) goto out; *d++ = w & 0xff; w = 0; i++; if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) { if (i != 4) goto out; break; } goto cont; } w = (w * 10) + c; if ((w & 0xffff) > 255) { goto out; } cont: if (i >= 4) goto out; s++; srclen--; } ret = 1; memcpy(dst, dbuf, sizeof(dbuf)); out: if (end) *end = s; return ret; } EXPORT_SYMBOL(in4_pton); /** * in6_pton - convert an IPv6 address from literal to binary representation * @src: the start of the IPv6 address string * @srclen: the length of the string, -1 means strlen(src) * @dst: the binary (u8[16] array) representation of the IPv6 address * @delim: the delimiter of the IPv6 address in @src, -1 means no delimiter * @end: A pointer to the end of the parsed string will be placed here * * Return one on success, return zero when any error occurs * and @end will point to the end of the parsed string. * */ int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end) { const char *s, *tok = NULL; u8 *d, *dc = NULL; u8 dbuf[16]; int ret = 0; int i; int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL; int w = 0; memset(dbuf, 0, sizeof(dbuf)); s = src; d = dbuf; if (srclen < 0) srclen = strlen(src); while (1) { int c; c = xdigit2bin(srclen > 0 ? *s : '\0', delim); if (!(c & state)) goto out; if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) { /* process one 16-bit word */ if (!(state & IN6PTON_NULL)) { *d++ = (w >> 8) & 0xff; *d++ = w & 0xff; } w = 0; if (c & IN6PTON_DELIM) { /* We've processed last word */ break; } /* * COLON_1 => XDIGIT * COLON_2 => XDIGIT|DELIM * COLON_1_2 => COLON_2 */ switch (state & IN6PTON_COLON_MASK) { case IN6PTON_COLON_2: dc = d; state = IN6PTON_XDIGIT | IN6PTON_DELIM; if (dc - dbuf >= sizeof(dbuf)) state |= IN6PTON_NULL; break; case IN6PTON_COLON_1|IN6PTON_COLON_1_2: state = IN6PTON_XDIGIT | IN6PTON_COLON_2; break; case IN6PTON_COLON_1: state = IN6PTON_XDIGIT; break; case IN6PTON_COLON_1_2: state = IN6PTON_COLON_2; break; default: state = 0; } tok = s + 1; goto cont; } if (c & IN6PTON_DOT) { ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s); if (ret > 0) { d += 4; break; } goto out; } w = (w << 4) | (0xff & c); state = IN6PTON_COLON_1 | IN6PTON_DELIM; if (!(w & 0xf000)) { state |= IN6PTON_XDIGIT; } if (!dc && d + 2 < dbuf + sizeof(dbuf)) { state |= IN6PTON_COLON_1_2; state &= ~IN6PTON_DELIM; } if (d + 2 >= dbuf + sizeof(dbuf)) { state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2); } cont: if ((dc && d + 4 < dbuf + sizeof(dbuf)) || d + 4 == dbuf + sizeof(dbuf)) { state |= IN6PTON_DOT; } if (d >= dbuf + sizeof(dbuf)) { state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK); } s++; srclen--; } i = 15; d--; if (dc) { while (d >= dc) dst[i--] = *d--; while (i >= dc - dbuf) dst[i--] = 0; while (i >= 0) dst[i--] = *d--; } else memcpy(dst, dbuf, sizeof(dbuf)); ret = 1; out: if (end) *end = s; return ret; } EXPORT_SYMBOL(in6_pton); static int inet4_pton(const char *src, u16 port_num, struct sockaddr_storage *addr) { struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; size_t srclen = strlen(src); if (srclen > INET_ADDRSTRLEN) return -EINVAL; if (in4_pton(src, srclen, (u8 *)&addr4->sin_addr.s_addr, '\n', NULL) == 0) return -EINVAL; addr4->sin_family = AF_INET; addr4->sin_port = htons(port_num); return 0; } static int inet6_pton(struct net *net, const char *src, u16 port_num, struct sockaddr_storage *addr) { struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; const char *scope_delim; size_t srclen = strlen(src); if (srclen > INET6_ADDRSTRLEN) return -EINVAL; if (in6_pton(src, srclen, (u8 *)&addr6->sin6_addr.s6_addr, '%', &scope_delim) == 0) return -EINVAL; if (ipv6_addr_type(&addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL && src + srclen != scope_delim && *scope_delim == '%') { struct net_device *dev; char scope_id[16]; size_t scope_len = min_t(size_t, sizeof(scope_id) - 1, src + srclen - scope_delim - 1); memcpy(scope_id, scope_delim + 1, scope_len); scope_id[scope_len] = '\0'; dev = dev_get_by_name(net, scope_id); if (dev) { addr6->sin6_scope_id = dev->ifindex; dev_put(dev); } else if (kstrtouint(scope_id, 0, &addr6->sin6_scope_id)) { return -EINVAL; } } addr6->sin6_family = AF_INET6; addr6->sin6_port = htons(port_num); return 0; } /** * inet_pton_with_scope - convert an IPv4/IPv6 and port to socket address * @net: net namespace (used for scope handling) * @af: address family, AF_INET, AF_INET6 or AF_UNSPEC for either * @src: the start of the address string * @port: the start of the port string (or NULL for none) * @addr: output socket address * * Return zero on success, return errno when any error occurs. */ int inet_pton_with_scope(struct net *net, __kernel_sa_family_t af, const char *src, const char *port, struct sockaddr_storage *addr) { u16 port_num; int ret = -EINVAL; if (port) { if (kstrtou16(port, 0, &port_num)) return -EINVAL; } else { port_num = 0; } switch (af) { case AF_INET: ret = inet4_pton(src, port_num, addr); break; case AF_INET6: ret = inet6_pton(net, src, port_num, addr); break; case AF_UNSPEC: ret = inet4_pton(src, port_num, addr); if (ret) ret = inet6_pton(net, src, port_num, addr); break; default: pr_err("unexpected address family %d\n", af); } return ret; } EXPORT_SYMBOL(inet_pton_with_scope); bool inet_addr_is_any(struct sockaddr *addr) { if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr; const struct sockaddr_in6 in6_any = { .sin6_addr = IN6ADDR_ANY_INIT }; if (!memcmp(in6->sin6_addr.s6_addr, in6_any.sin6_addr.s6_addr, 16)) return true; } else if (addr->sa_family == AF_INET) { struct sockaddr_in *in = (struct sockaddr_in *)addr; if (in->sin_addr.s_addr == htonl(INADDR_ANY)) return true; } else { pr_warn("unexpected address family %u\n", addr->sa_family); } return false; } EXPORT_SYMBOL(inet_addr_is_any); void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr) { if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_replace4(sum, from, to); if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) skb->csum = ~csum_add(csum_sub(~(skb->csum), (__force __wsum)from), (__force __wsum)to); } else if (pseudohdr) *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), (__force __wsum)from), (__force __wsum)to)); } EXPORT_SYMBOL(inet_proto_csum_replace4); /** * inet_proto_csum_replace16 - update layer 4 header checksum field * @sum: Layer 4 header checksum field * @skb: sk_buff for the packet * @from: old IPv6 address * @to: new IPv6 address * @pseudohdr: True if layer 4 header checksum includes pseudoheader * * Update layer 4 header as per the update in IPv6 src/dst address. * * There is no need to update skb->csum in this function, because update in two * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to * update skb->csum, because update in 3 fields a.) IPv4 src/dst address, * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as * L4 Header checksum for skb->csum calculation. */ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, bool pseudohdr) { __be32 diff[] = { ~from[0], ~from[1], ~from[2], ~from[3], to[0], to[1], to[2], to[3], }; if (skb->ip_summed != CHECKSUM_PARTIAL) { *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); } else if (pseudohdr) *sum = ~csum_fold(csum_partial(diff, sizeof(diff), csum_unfold(*sum))); } EXPORT_SYMBOL(inet_proto_csum_replace16); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr) { if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_replace_by_diff(sum, diff); if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) skb->csum = ~csum_sub(diff, skb->csum); } else if (pseudohdr) { *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum))); } } EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
3 3 1 3 3 3 2 1 3 1 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 // SPDX-License-Identifier: GPL-2.0-or-later /* * Syntek DV4000 (STK014) subdriver * * Copyright (C) 2008 Jean-Francois Moine (http://moinejf.free.fr) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stk014" #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver"); MODULE_LICENSE("GPL"); #define QUALITY 50 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 jpeg_hdr[JPEG_HDR_SZ]; }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* -- read a register -- */ static u8 reg_r(struct gspca_dev *gspca_dev, __u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, __u16 index, __u16 value) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- get a bulk value (4 bytes) -- */ static void rcv_val(struct gspca_dev *gspca_dev, int ads) { struct usb_device *dev = gspca_dev->dev; int alen, ret; reg_w(gspca_dev, 0x634, (ads >> 16) & 0xff); reg_w(gspca_dev, 0x635, (ads >> 8) & 0xff); reg_w(gspca_dev, 0x636, ads & 0xff); reg_w(gspca_dev, 0x637, 0); reg_w(gspca_dev, 0x638, 4); /* len & 0xff */ reg_w(gspca_dev, 0x639, 0); /* len >> 8 */ reg_w(gspca_dev, 0x63a, 0); reg_w(gspca_dev, 0x63b, 0); reg_w(gspca_dev, 0x630, 5); if (gspca_dev->usb_err < 0) return; ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, 0x05), gspca_dev->usb_buf, 4, /* length */ &alen, 500); /* timeout in milliseconds */ if (ret < 0) { pr_err("rcv_val err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- send a bulk value -- */ static void snd_val(struct gspca_dev *gspca_dev, int ads, unsigned int val) { struct usb_device *dev = gspca_dev->dev; int alen, ret; __u8 seq = 0; if (ads == 0x003f08) { reg_r(gspca_dev, 0x0704); seq = reg_r(gspca_dev, 0x0705); reg_r(gspca_dev, 0x0650); reg_w(gspca_dev, 0x654, seq); } else { reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff); } reg_w(gspca_dev, 0x655, (ads >> 8) & 0xff); reg_w(gspca_dev, 0x656, ads & 0xff); reg_w(gspca_dev, 0x657, 0); reg_w(gspca_dev, 0x658, 0x04); /* size */ reg_w(gspca_dev, 0x659, 0); reg_w(gspca_dev, 0x65a, 0); reg_w(gspca_dev, 0x65b, 0); reg_w(gspca_dev, 0x650, 5); if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = val >> 24; gspca_dev->usb_buf[1] = val >> 16; gspca_dev->usb_buf[2] = val >> 8; gspca_dev->usb_buf[3] = val; ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 6), gspca_dev->usb_buf, 4, &alen, 500); /* timeout in milliseconds */ if (ret < 0) { pr_err("snd_val err %d\n", ret); gspca_dev->usb_err = ret; } else { if (ads == 0x003f08) { seq += 4; seq &= 0x3f; reg_w(gspca_dev, 0x705, seq); } } } /* set a camera parameter */ static void set_par(struct gspca_dev *gspca_dev, int parval) { snd_val(gspca_dev, 0x003f08, parval); } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x06000000 /* whiteness */ + (val << 16); set_par(gspca_dev, parval); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x07000000 /* contrast */ + (val << 16); set_par(gspca_dev, parval); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { int parval; parval = 0x08000000 /* saturation */ + (val << 16); set_par(gspca_dev, parval); } static void setlightfreq(struct gspca_dev *gspca_dev, s32 val) { set_par(gspca_dev, val == 1 ? 0x33640000 /* 50 Hz */ : 0x33780000); /* 60 Hz */ } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u8 ret; /* check if the device responds */ usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); ret = reg_r(gspca_dev, 0x0740); if (gspca_dev->usb_err >= 0) { if (ret != 0xff) { pr_err("init reg: 0x%02x\n", ret); gspca_dev->usb_err = -EIO; } } return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, value; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); /* work on alternate 1 */ usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); set_par(gspca_dev, 0x10000000); set_par(gspca_dev, 0x00000000); set_par(gspca_dev, 0x8002e001); set_par(gspca_dev, 0x14000000); if (gspca_dev->pixfmt.width > 320) value = 0x8002e001; /* 640x480 */ else value = 0x4001f000; /* 320x240 */ set_par(gspca_dev, value); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->alt); if (ret < 0) { pr_err("set intf %d %d failed\n", gspca_dev->iface, gspca_dev->alt); gspca_dev->usb_err = ret; goto out; } reg_r(gspca_dev, 0x0630); rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */ reg_r(gspca_dev, 0x0650); snd_val(gspca_dev, 0x000020, 0xffffffff); reg_w(gspca_dev, 0x0620, 0); reg_w(gspca_dev, 0x0630, 0); reg_w(gspca_dev, 0x0640, 0); reg_w(gspca_dev, 0x0650, 0); reg_w(gspca_dev, 0x0660, 0); set_par(gspca_dev, 0x09800000); /* Red ? */ set_par(gspca_dev, 0x0a800000); /* Green ? */ set_par(gspca_dev, 0x0b800000); /* Blue ? */ set_par(gspca_dev, 0x0d030000); /* Gamma ? */ /* start the video flow */ set_par(gspca_dev, 0x01000000); set_par(gspca_dev, 0x01000000); if (gspca_dev->usb_err >= 0) gspca_dbg(gspca_dev, D_STREAM, "camera started alt: 0x%02x\n", gspca_dev->alt); out: return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; set_par(gspca_dev, 0x02000000); set_par(gspca_dev, 0x02000000); usb_set_interface(dev, gspca_dev->iface, 1); reg_r(gspca_dev, 0x0630); rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */ reg_r(gspca_dev, 0x0650); snd_val(gspca_dev, 0x000020, 0xffffffff); reg_w(gspca_dev, 0x0620, 0); reg_w(gspca_dev, 0x0630, 0); reg_w(gspca_dev, 0x0640, 0); reg_w(gspca_dev, 0x0650, 0); reg_w(gspca_dev, 0x0660, 0); gspca_dbg(gspca_dev, D_STREAM, "camera stopped\n"); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; static unsigned char ffd9[] = {0xff, 0xd9}; /* a frame starts with: * - 0xff 0xfe * - 0x08 0x00 - length (little endian ?!) * - 4 bytes = size of whole frame (BE - including header) * - 0x00 0x0c * - 0xff 0xd8 * - .. JPEG image with escape sequences (ff 00) * (without ending - ff d9) */ if (data[0] == 0xff && data[1] == 0xfe) { gspca_frame_add(gspca_dev, LAST_PACKET, ffd9, 2); /* put the JPEG 411 header */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* beginning of the frame */ #define STKHDRSZ 12 data += STKHDRSZ; len -= STKHDRSZ; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_POWER_LINE_FREQUENCY: setlightfreq(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 127); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 127); v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1, V4L2_CID_POWER_LINE_FREQUENCY_50HZ); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x05e1, 0x0893)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 /* * Copyright (C) 2016 Red Hat * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> */ #ifndef DRM_PRINT_H_ #define DRM_PRINT_H_ #include <linux/compiler.h> #include <linux/printk.h> #include <linux/device.h> #include <linux/dynamic_debug.h> #include <drm/drm.h> struct debugfs_regset32; struct drm_device; struct seq_file; /* Do *not* use outside of drm_print.[ch]! */ extern unsigned long __drm_debug; /** * DOC: print * * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same * debug code to be used for both debugfs and printk logging. * * For example:: * * void log_some_info(struct drm_printer *p) * { * drm_printf(p, "foo=%d\n", foo); * drm_printf(p, "bar=%d\n", bar); * } * * #ifdef CONFIG_DEBUG_FS * void debugfs_show(struct seq_file *f) * { * struct drm_printer p = drm_seq_file_printer(f); * log_some_info(&p); * } * #endif * * void some_other_function(...) * { * struct drm_printer p = drm_info_printer(drm->dev); * log_some_info(&p); * } */ /** * enum drm_debug_category - The DRM debug categories * * Each of the DRM debug logging macros use a specific category, and the logging * is filtered by the drm.debug module parameter. This enum specifies the values * for the interface. * * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except * DRM_DEBUG() logs to DRM_UT_CORE. * * Enabling verbose debug messages is done through the drm.debug parameter, each * category being enabled by a bit: * * - drm.debug=0x1 will enable CORE messages * - drm.debug=0x2 will enable DRIVER messages * - drm.debug=0x3 will enable CORE and DRIVER messages * - ... * - drm.debug=0x1ff will enable all messages * * An interesting feature is that it's possible to enable verbose logging at * run-time by echoing the debug value in its sysfs node:: * * # echo 0xf > /sys/module/drm/parameters/debug * */ enum drm_debug_category { /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */ /** * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, * drm_memory.c, ... */ DRM_UT_CORE, /** * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, * radeon, ... macro. */ DRM_UT_DRIVER, /** * @DRM_UT_KMS: Used in the modesetting code. */ DRM_UT_KMS, /** * @DRM_UT_PRIME: Used in the prime code. */ DRM_UT_PRIME, /** * @DRM_UT_ATOMIC: Used in the atomic code. */ DRM_UT_ATOMIC, /** * @DRM_UT_VBL: Used for verbose debug message in the vblank code. */ DRM_UT_VBL, /** * @DRM_UT_STATE: Used for verbose atomic state debugging. */ DRM_UT_STATE, /** * @DRM_UT_LEASE: Used in the lease code. */ DRM_UT_LEASE, /** * @DRM_UT_DP: Used in the DP code. */ DRM_UT_DP, /** * @DRM_UT_DRMRES: Used in the drm managed resources code. */ DRM_UT_DRMRES }; static inline bool drm_debug_enabled_raw(enum drm_debug_category category) { return unlikely(__drm_debug & BIT(category)); } #define drm_debug_enabled_instrumented(category) \ ({ \ pr_debug("todo: is this frequent enough to optimize ?\n"); \ drm_debug_enabled_raw(category); \ }) #if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) /* * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets * a descriptor, and only enabled callsites are reachable. They use * the private macro to avoid re-testing the enable-bit. */ #define __drm_debug_enabled(category) true #define drm_debug_enabled(category) drm_debug_enabled_instrumented(category) #else #define __drm_debug_enabled(category) drm_debug_enabled_raw(category) #define drm_debug_enabled(category) drm_debug_enabled_raw(category) #endif /** * struct drm_printer - drm output "stream" * * Do not use struct members directly. Use drm_printer_seq_file(), * drm_printer_info(), etc to initialize. And drm_printf() for output. */ struct drm_printer { /* private: */ void (*printfn)(struct drm_printer *p, struct va_format *vaf); void (*puts)(struct drm_printer *p, const char *str); void *arg; const void *origin; const char *prefix; struct { unsigned int series; unsigned int counter; } line; enum drm_debug_category category; }; void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); void __drm_puts_coredump(struct drm_printer *p, const char *str); void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); void __drm_puts_seq_file(struct drm_printer *p, const char *str); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); void drm_puts(struct drm_printer *p, const char *str); void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); void drm_print_bits(struct drm_printer *p, unsigned long value, const char * const bits[], unsigned int nbits); void drm_print_hex_dump(struct drm_printer *p, const char *prefix, const u8 *buf, size_t len); __printf(2, 0) /** * drm_vprintf - print to a &drm_printer stream * @p: the &drm_printer * @fmt: format string * @va: the va_list */ static inline void drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) { struct va_format vaf = { .fmt = fmt, .va = va }; p->printfn(p, &vaf); } /** * drm_printf_indent - Print to a &drm_printer stream with indentation * @printer: DRM printer * @indent: Tab indentation level (max 5) * @fmt: Format string */ #define drm_printf_indent(printer, indent, fmt, ...) \ drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) /** * struct drm_print_iterator - local struct used with drm_printer_coredump * @data: Pointer to the devcoredump output buffer, can be NULL if using * drm_printer_coredump to determine size of devcoredump * @start: The offset within the buffer to start writing * @remain: The number of bytes to write for this iteration */ struct drm_print_iterator { void *data; ssize_t start; ssize_t remain; /* private: */ ssize_t offset; }; /** * drm_coredump_printer - construct a &drm_printer that can output to a buffer * from the read function for devcoredump * @iter: A pointer to a struct drm_print_iterator for the read instance * * This wrapper extends drm_printf() to work with a dev_coredumpm() callback * function. The passed in drm_print_iterator struct contains the buffer * pointer, size and offset as passed in from devcoredump. * * For example:: * * void coredump_read(char *buffer, loff_t offset, size_t count, * void *data, size_t datalen) * { * struct drm_print_iterator iter; * struct drm_printer p; * * iter.data = buffer; * iter.start = offset; * iter.remain = count; * * p = drm_coredump_printer(&iter); * * drm_printf(p, "foo=%d\n", foo); * } * * void makecoredump(...) * { * ... * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL, * coredump_read, ...) * } * * The above example has a time complexity of O(N^2), where N is the size of the * devcoredump. This is acceptable for small devcoredumps but scales poorly for * larger ones. * * Another use case for drm_coredump_printer is to capture the devcoredump into * a saved buffer before the dev_coredump() callback. This involves two passes: * one to determine the size of the devcoredump and another to print it to a * buffer. Then, in dev_coredump(), copy from the saved buffer into the * devcoredump read buffer. * * For example:: * * char *devcoredump_saved_buffer; * * ssize_t __coredump_print(char *buffer, ssize_t count, ...) * { * struct drm_print_iterator iter; * struct drm_printer p; * * iter.data = buffer; * iter.start = 0; * iter.remain = count; * * p = drm_coredump_printer(&iter); * * drm_printf(p, "foo=%d\n", foo); * ... * return count - iter.remain; * } * * void coredump_print(...) * { * ssize_t count; * * count = __coredump_print(NULL, INT_MAX, ...); * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL); * __coredump_print(devcoredump_saved_buffer, count, ...); * } * * void coredump_read(char *buffer, loff_t offset, size_t count, * void *data, size_t datalen) * { * ... * memcpy(buffer, devcoredump_saved_buffer + offset, count); * ... * } * * The above example has a time complexity of O(N*2), where N is the size of the * devcoredump. This scales better than the previous example for larger * devcoredumps. * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_coredump_printer(struct drm_print_iterator *iter) { struct drm_printer p = { .printfn = __drm_printfn_coredump, .puts = __drm_puts_coredump, .arg = iter, }; /* Set the internal offset of the iterator to zero */ iter->offset = 0; return p; } /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file * @f: the &struct seq_file to output to * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) { struct drm_printer p = { .printfn = __drm_printfn_seq_file, .puts = __drm_puts_seq_file, .arg = f, }; return p; } /** * drm_info_printer - construct a &drm_printer that outputs to dev_printk() * @dev: the &struct device pointer * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_info_printer(struct device *dev) { struct drm_printer p = { .printfn = __drm_printfn_info, .arg = dev, }; return p; } /** * drm_dbg_printer - construct a &drm_printer for drm device specific output * @drm: the &struct drm_device pointer, or NULL * @category: the debug category to use * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_dbg_printer(struct drm_device *drm, enum drm_debug_category category, const char *prefix) { struct drm_printer p = { .printfn = __drm_printfn_dbg, .arg = drm, .origin = (const void *)_THIS_IP_, /* it's fine as we will be inlined */ .prefix = prefix, .category = category, }; return p; } /** * drm_err_printer - construct a &drm_printer that outputs to drm_err() * @drm: the &struct drm_device pointer * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_err_printer(struct drm_device *drm, const char *prefix) { struct drm_printer p = { .printfn = __drm_printfn_err, .arg = drm, .prefix = prefix }; return p; } /** * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers * @p: the &struct drm_printer which actually generates the output * @prefix: optional output prefix, or NULL for no prefix * @series: optional unique series identifier, or 0 to omit identifier in the output * * This printer can be used to increase the robustness of the captured output * to make sure we didn't lost any intermediate lines of the output. Helpful * while capturing some crash data. * * Example 1:: * * void crash_dump(struct drm_device *drm) * { * static unsigned int id; * struct drm_printer p = drm_err_printer(drm, "crash"); * struct drm_printer lp = drm_line_printer(&p, "dump", ++id); * * drm_printf(&lp, "foo"); * drm_printf(&lp, "bar"); * } * * Above code will print into the dmesg something like:: * * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar * * Example 2:: * * void line_dump(struct device *dev) * { * struct drm_printer p = drm_info_printer(dev); * struct drm_printer lp = drm_line_printer(&p, NULL, 0); * * drm_printf(&lp, "foo"); * drm_printf(&lp, "bar"); * } * * Above code will print:: * * [ ] 0000:00:00.0: [drm] 1: foo * [ ] 0000:00:00.0: [drm] 2: bar * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_line_printer(struct drm_printer *p, const char *prefix, unsigned int series) { struct drm_printer lp = { .printfn = __drm_printfn_line, .arg = p, .prefix = prefix, .line = { .series = series, }, }; return lp; } /* * struct device based logging * * Prefer drm_device based logging over device or printk based logging. */ __printf(3, 4) void drm_dev_printk(const struct device *dev, const char *level, const char *format, ...); struct _ddebug; __printf(4, 5) void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, enum drm_debug_category category, const char *format, ...); /** * DRM_DEV_ERROR() - Error output. * * NOTE: this is deprecated in favor of drm_err() or dev_err(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_ERROR(dev, fmt, ...) \ drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__) /** * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output. * * NOTE: this is deprecated in favor of drm_err_ratelimited() or * dev_err_ratelimited(). * * @dev: device pointer * @fmt: printf() like format string. * * Like DRM_ERROR() but won't flood the log. */ #define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ }) /* NOTE: this is deprecated in favor of drm_info() or dev_info(). */ #define DRM_DEV_INFO(dev, fmt, ...) \ drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */ #define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ if (!__print_once) { \ __print_once = true; \ DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ } \ }) #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) #define drm_dev_dbg(dev, cat, fmt, ...) \ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__) #else #define drm_dev_dbg(dev, cat, fmt, ...) \ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ dev, cat, fmt, ##__VA_ARGS__) #endif /** * DRM_DEV_DEBUG() - Debug output for generic drm code * * NOTE: this is deprecated in favor of drm_dbg_core(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) /** * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver * * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) /** * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code * * NOTE: this is deprecated in favor of drm_dbg_kms(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) /* * struct drm_device based logging * * Prefer drm_device based logging over device or prink based logging. */ /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ __drm_printk((drm), info,, fmt, ##__VA_ARGS__) #define drm_notice(drm, fmt, ...) \ __drm_printk((drm), notice,, fmt, ##__VA_ARGS__) #define drm_warn(drm, fmt, ...) \ __drm_printk((drm), warn,, fmt, ##__VA_ARGS__) #define drm_err(drm, fmt, ...) \ __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_info_once(drm, fmt, ...) \ __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__) #define drm_notice_once(drm, fmt, ...) \ __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__) #define drm_warn_once(drm, fmt, ...) \ __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__) #define drm_err_once(drm, fmt, ...) \ __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_err_ratelimited(drm, fmt, ...) \ __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_dbg_core(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) #define drm_dbg_driver(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) /* * printk based logging * * Prefer drm_device based logging over device or prink based logging. */ __printf(1, 2) void __drm_err(const char *format, ...); #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) #define __drm_dbg(cat, fmt, ...) __drm_dev_dbg(NULL, NULL, cat, fmt, ##__VA_ARGS__) #else #define __drm_dbg(cat, fmt, ...) \ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ NULL, cat, fmt, ##__VA_ARGS__) #endif /* Macros to make printk easier */ #define _DRM_PRINTK(once, level, fmt, ...) \ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_info(). */ #define DRM_INFO(fmt, ...) \ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_notice(). */ #define DRM_NOTE(fmt, ...) \ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_warn(). */ #define DRM_WARN(fmt, ...) \ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_info_once(). */ #define DRM_INFO_ONCE(fmt, ...) \ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_notice_once(). */ #define DRM_NOTE_ONCE(fmt, ...) \ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_warn_once(). */ #define DRM_WARN_ONCE(fmt, ...) \ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_err(). */ #define DRM_ERROR(fmt, ...) \ __drm_err(fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_err_ratelimited(). */ #define DRM_ERROR_RATELIMITED(fmt, ...) \ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */ #define DRM_DEBUG(fmt, ...) \ __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */ #define DRM_DEBUG_DRIVER(fmt, ...) \ __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */ #define DRM_DEBUG_KMS(fmt, ...) \ __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */ #define DRM_DEBUG_PRIME(fmt, ...) \ __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */ #define DRM_DEBUG_ATOMIC(fmt, ...) \ __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */ #define DRM_DEBUG_VBL(fmt, ...) \ __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */ #define DRM_DEBUG_LEASE(fmt, ...) \ __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */ #define DRM_DEBUG_DP(fmt, ...) \ __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) #define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ const struct drm_device *drm_ = (drm); \ \ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ }) #define drm_dbg_ratelimited(drm, fmt, ...) \ __DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__) #define drm_dbg_kms_ratelimited(drm, fmt, ...) \ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__) /* * struct drm_device based WARNs * * drm_WARN*() acts like WARN*(), but with the key difference of * using device specific information so that we know from which device * warning is originating from. * * Prefer drm_device based drm_WARN* over regular WARN* */ /* Helper for struct drm_device based WARNs */ #define drm_WARN(drm, condition, format, arg...) \ WARN(condition, "%s %s: [drm] " format, \ dev_driver_string((drm)->dev), \ dev_name((drm)->dev), ## arg) #define drm_WARN_ONCE(drm, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: [drm] " format, \ dev_driver_string((drm)->dev), \ dev_name((drm)->dev), ## arg) #define drm_WARN_ON(drm, x) \ drm_WARN((drm), (x), "%s", \ "drm_WARN_ON(" __stringify(x) ")") #define drm_WARN_ON_ONCE(drm, x) \ drm_WARN_ONCE((drm), (x), "%s", \ "drm_WARN_ON_ONCE(" __stringify(x) ")") #endif /* DRM_PRINT_H_ */
5 5 7 7 7 4 4 4 4 4 4 4 4 4 1 3 2 2 3 1 1 4 4 25 25 25 25 1 1 2 25 13 25 25 25 24 15 25 40 28 16 3 1 2 1 1 1 1 35 20 27 2 19 18 18 18 4 15 1 14 14 2 35 13 2 11 2 1 3 2 11 11 1 10 1 2 1 11 11 11 11 1 10 10 10 10 4 1 3 3 3 3 3 3 3 1 2 2 2 2 5 5 3 3 3 3 3 7 40 33 33 35 5 4 2 1 1 1 1 35 5 45 9 3 3 1 1 2 5 5 4 7 9 7 4 4 1 1 6 7 6 3 2 1 1 1 6 6 4 3 1 9 9 9 7 6 9 9 9 18 18 11 4 16 2 2 1 1 1 2 35 23 9 9 9 9 5 5 5 35 18 35 30 5 44 5 45 24 45 45 45 45 45 35 45 5 5 5 4 4 5 5 5 3 45 23 45 44 45 44 27 27 18 43 43 2 45 1 44 23 23 23 45 45 44 45 45 10 35 35 35 1 35 35 5 5 5 5 5 5 5 5 5 44 40 40 9 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_driver.c -- USB Video Class driver * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/atomic.h> #include <linux/bits.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/unaligned.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "uvcvideo.h" #define DRIVER_AUTHOR "Laurent Pinchart " \ "<laurent.pinchart@ideasonboard.com>" #define DRIVER_DESC "USB Video Class driver" unsigned int uvc_clock_param = CLOCK_MONOTONIC; unsigned int uvc_hw_timestamps_param; unsigned int uvc_no_drop_param = 1; static unsigned int uvc_quirks_param = -1; unsigned int uvc_dbg_param; unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT; /* ------------------------------------------------------------------------ * Utility functions */ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, u8 epaddr) { struct usb_host_endpoint *ep; unsigned int i; for (i = 0; i < alts->desc.bNumEndpoints; ++i) { ep = &alts->endpoint[i]; if (ep->desc.bEndpointAddress == epaddr) return ep; } return NULL; } static enum v4l2_colorspace uvc_colorspace(const u8 primaries) { static const enum v4l2_colorspace colorprimaries[] = { V4L2_COLORSPACE_SRGB, /* Unspecified */ V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_470_SYSTEM_M, V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_SMPTE240M, }; if (primaries < ARRAY_SIZE(colorprimaries)) return colorprimaries[primaries]; return V4L2_COLORSPACE_SRGB; /* Reserved */ } static enum v4l2_xfer_func uvc_xfer_func(const u8 transfer_characteristics) { /* * V4L2 does not currently have definitions for all possible values of * UVC transfer characteristics. If v4l2_xfer_func is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_XFER_FUNC_DEFAULT documented in videodev2.h. */ static const enum v4l2_xfer_func xfer_funcs[] = { V4L2_XFER_FUNC_DEFAULT, /* Unspecified */ V4L2_XFER_FUNC_709, V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 M */ V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 B, G */ V4L2_XFER_FUNC_709, /* Substitution for SMPTE 170M */ V4L2_XFER_FUNC_SMPTE240M, V4L2_XFER_FUNC_NONE, V4L2_XFER_FUNC_SRGB, }; if (transfer_characteristics < ARRAY_SIZE(xfer_funcs)) return xfer_funcs[transfer_characteristics]; return V4L2_XFER_FUNC_DEFAULT; /* Reserved */ } static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients) { /* * V4L2 does not currently have definitions for all possible values of * UVC matrix coefficients. If v4l2_ycbcr_encoding is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_YCBCR_ENC_DEFAULT documented in videodev2.h. * * FCC is assumed to be close enough to 601. */ static const enum v4l2_ycbcr_encoding ycbcr_encs[] = { V4L2_YCBCR_ENC_DEFAULT, /* Unspecified */ V4L2_YCBCR_ENC_709, V4L2_YCBCR_ENC_601, /* Substitution for FCC */ V4L2_YCBCR_ENC_601, /* Substitution for BT.470-2 B, G */ V4L2_YCBCR_ENC_601, V4L2_YCBCR_ENC_SMPTE240M, }; if (matrix_coefficients < ARRAY_SIZE(ycbcr_encs)) return ycbcr_encs[matrix_coefficients]; return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */ } /* ------------------------------------------------------------------------ * Terminal and unit management */ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) { struct uvc_entity *entity; list_for_each_entry(entity, &dev->entities, list) { if (entity->id == id) return entity; } return NULL; } static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev, int id, struct uvc_entity *entity) { unsigned int i; if (entity == NULL) entity = list_entry(&dev->entities, struct uvc_entity, list); list_for_each_entry_continue(entity, &dev->entities, list) { for (i = 0; i < entity->bNrInPins; ++i) if (entity->baSourceID[i] == id) return entity; } return NULL; } static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) { struct uvc_streaming *stream; list_for_each_entry(stream, &dev->streams, list) { if (stream->header.bTerminalLink == id) return stream; } return NULL; } /* ------------------------------------------------------------------------ * Streaming Object Management */ static void uvc_stream_delete(struct uvc_streaming *stream) { if (stream->async_wq) destroy_workqueue(stream->async_wq); mutex_destroy(&stream->mutex); usb_put_intf(stream->intf); kfree(stream->formats); kfree(stream->header.bmaControls); kfree(stream); } static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *stream; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) return NULL; mutex_init(&stream->mutex); stream->dev = dev; stream->intf = usb_get_intf(intf); stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; /* Allocate a stream specific work queue for asynchronous tasks. */ stream->async_wq = alloc_workqueue("uvcvideo", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!stream->async_wq) { uvc_stream_delete(stream); return NULL; } return stream; } /* ------------------------------------------------------------------------ * Descriptors parsing */ static int uvc_parse_frame(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, struct uvc_frame *frame, u32 **intervals, u8 ftype, int width_multiplier, const unsigned char *buffer, int buflen) { struct usb_host_interface *alts = streaming->intf->cur_altsetting; unsigned int maxIntervalIndex; unsigned int interval; unsigned int i, n; if (ftype != UVC_VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4 * n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = get_unaligned_le16(&buffer[5]) * width_multiplier; frame->wHeight = get_unaligned_le16(&buffer[7]); frame->dwMinBitRate = get_unaligned_le32(&buffer[9]); frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]); if (ftype != UVC_VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = get_unaligned_le32(&buffer[17]); frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[17]); frame->bFrameIntervalType = buffer[21]; } /* * Copy the frame intervals. * * Some bogus devices report dwMinFrameInterval equal to * dwMaxFrameInterval and have dwFrameIntervalStep set to zero. Setting * all null intervals to 1 fixes the problem and some other divisions * by zero that could happen. */ frame->dwFrameInterval = *intervals; for (i = 0; i < n; ++i) { interval = get_unaligned_le32(&buffer[26 + 4 * i]); (*intervals)[i] = interval ? interval : 1; } /* * Apply more fixes, quirks and workarounds to handle incorrect or * broken descriptors. */ /* * Several UVC chipsets screw up dwMaxVideoFrameBufferSize completely. * Observed behaviours range from setting the value to 1.1x the actual * frame size to hardwiring the 16 low bits to 0. This results in a * higher than necessary memory usage as well as a wrong image size * information. For uncompressed formats this can be fixed by computing * the value from the frame size. */ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; /* * Clamp the default frame interval to the boundaries. A zero * bFrameIntervalType value indicates a continuous frame interval * range, with dwFrameInterval[0] storing the minimum value and * dwFrameInterval[1] storing the maximum value. */ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1; frame->dwDefaultFrameInterval = clamp(frame->dwDefaultFrameInterval, frame->dwFrameInterval[0], frame->dwFrameInterval[maxIntervalIndex]); /* * Some devices report frame intervals that are not functional. If the * corresponding quirk is set, restrict operation to the first interval * only. */ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) { frame->bFrameIntervalType = 1; (*intervals)[0] = frame->dwDefaultFrameInterval; } uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000 / frame->dwDefaultFrameInterval, (100000000 / frame->dwDefaultFrameInterval) % 10); *intervals += n; return buffer[0]; } static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer, int buflen) { struct usb_host_interface *alts = streaming->intf->cur_altsetting; const struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int width_multiplier = 1; unsigned int i, n; u8 ftype; int ret; format->type = buffer[2]; format->index = buffer[3]; format->frames = frames; switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_FRAME_BASED: n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28; if (buflen < n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); if (!fmtdesc) { /* * Unknown video formats are not fatal errors, the * caller will skip this descriptor. */ dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); return 0; } format->fcc = fmtdesc->fcc; format->bpp = buffer[21]; /* * Some devices report a format that doesn't match what they * really send. */ if (dev->quirks & UVC_QUIRK_FORCE_Y8) { if (format->fcc == V4L2_PIX_FMT_YUYV) { format->fcc = V4L2_PIX_FMT_GREY; format->bpp = 8; width_multiplier = 2; } } /* Some devices report bpp that doesn't match the format. */ if (dev->quirks & UVC_QUIRK_FORCE_BPP) { const struct v4l2_format_info *info = v4l2_format_info(format->fcc); if (info) { unsigned int div = info->hdiv * info->vdiv; n = info->bpp[0] * div; for (i = 1; i < info->comp_planes; i++) n += info->bpp[i]; format->bpp = DIV_ROUND_UP(8 * n, div); } } if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) { ftype = UVC_VS_FRAME_UNCOMPRESSED; } else { ftype = UVC_VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case UVC_VS_FORMAT_MJPEG: if (buflen < 11) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = UVC_VS_FRAME_MJPEG; break; case UVC_VS_FORMAT_DV: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } if ((buffer[8] & 0x7f) > 2) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; /* Create a dummy frame descriptor. */ frame = &frames[0]; memset(frame, 0, sizeof(*frame)); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: /* Not supported yet. */ default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc); buflen -= buffer[0]; buffer += buffer[0]; /* * Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ if (ftype) { while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == ftype) { frame = &frames[format->nframes]; ret = uvc_parse_frame(dev, streaming, format, frame, intervals, ftype, width_multiplier, buffer, buflen); if (ret < 0) return ret; format->nframes++; buflen -= ret; buffer += ret; } } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_COLORFORMAT) { if (buflen < 6) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); format->xfer_func = uvc_xfer_func(buffer[4]); format->ycbcr_enc = uvc_ycbcr_enc(buffer[5]); buflen -= buffer[0]; buffer += buffer[0]; } else { format->colorspace = V4L2_COLORSPACE_SRGB; } return buffer - start; } static int uvc_parse_streaming(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *streaming = NULL; struct uvc_format *format; struct uvc_frame *frame; struct usb_host_interface *alts = &intf->altsetting[0]; const unsigned char *_buffer, *buffer = alts->extra; int _buflen, buflen = alts->extralen; unsigned int nformats = 0, nframes = 0, nintervals = 0; unsigned int size, i, n, p; u32 *interval; u16 psize; int ret = -EINVAL; if (intf->cur_altsetting->desc.bInterfaceSubClass != UVC_SC_VIDEOSTREAMING) { uvc_dbg(dev, DESCR, "device %d interface %d isn't a video streaming interface\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } if (usb_driver_claim_interface(&uvc_driver.driver, intf, dev)) { uvc_dbg(dev, DESCR, "device %d interface %d is already claimed\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } streaming = uvc_stream_new(dev, intf); if (streaming == NULL) { usb_driver_release_interface(&uvc_driver.driver, intf); return -ENOMEM; } /* * The Pico iMage webcam has its class-specific interface descriptors * after the endpoint descriptors. */ if (buflen == 0) { for (i = 0; i < alts->desc.bNumEndpoints; ++i) { struct usb_host_endpoint *ep = &alts->endpoint[i]; if (ep->extralen == 0) continue; if (ep->extralen > 2 && ep->extra[1] == USB_DT_CS_INTERFACE) { uvc_dbg(dev, DESCR, "trying extra data from endpoint %u\n", i); buffer = alts->endpoint[i].extra; buflen = alts->endpoint[i].extralen; break; } } } /* Skip the standard interface descriptors. */ while (buflen > 2 && buffer[1] != USB_DT_CS_INTERFACE) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen <= 2) { uvc_dbg(dev, DESCR, "no class-specific streaming interface descriptors found\n"); goto error; } /* Parse the header descriptor. */ switch (buffer[2]) { case UVC_VS_OUTPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; size = 9; break; case UVC_VS_INPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; size = 13; break; default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor not found\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } p = buflen >= 4 ? buffer[3] : 0; n = buflen >= size ? buffer[size-1] : 0; if (buflen < size + p*n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor is invalid\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } streaming->header.bNumFormats = p; streaming->header.bEndpointAddress = buffer[6]; if (buffer[2] == UVC_VS_INPUT_HEADER) { streaming->header.bmInfo = buffer[7]; streaming->header.bTerminalLink = buffer[8]; streaming->header.bStillCaptureMethod = buffer[9]; streaming->header.bTriggerSupport = buffer[10]; streaming->header.bTriggerUsage = buffer[11]; } else { streaming->header.bTerminalLink = buffer[7]; } streaming->header.bControlSize = n; streaming->header.bmaControls = kmemdup(&buffer[size], p * n, GFP_KERNEL); if (streaming->header.bmaControls == NULL) { ret = -ENOMEM; goto error; } buflen -= buffer[0]; buffer += buffer[0]; _buffer = buffer; _buflen = buflen; /* Count the format and frame descriptors. */ while (_buflen > 2 && _buffer[1] == USB_DT_CS_INTERFACE) { switch (_buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_FRAME_BASED: nformats++; break; case UVC_VS_FORMAT_DV: /* * DV format has no frame descriptor. We will create a * dummy frame descriptor with a dummy frame interval. */ nformats++; nframes++; nintervals++; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT %u is not supported\n", dev->udev->devnum, alts->desc.bInterfaceNumber, _buffer[2]); break; case UVC_VS_FRAME_UNCOMPRESSED: case UVC_VS_FRAME_MJPEG: nframes++; if (_buflen > 25) nintervals += _buffer[25] ? _buffer[25] : 3; break; case UVC_VS_FRAME_FRAME_BASED: nframes++; if (_buflen > 21) nintervals += _buffer[21] ? _buffer[21] : 3; break; } _buflen -= _buffer[0]; _buffer += _buffer[0]; } if (nformats == 0) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has no supported formats defined\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } /* * Allocate memory for the formats, the frames and the intervals, * plus any required padding to guarantee that everything has the * correct alignment. */ size = nformats * sizeof(*format); size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame); size = ALIGN(size, __alignof__(*interval)) + nintervals * sizeof(*interval); format = kzalloc(size, GFP_KERNEL); if (!format) { ret = -ENOMEM; goto error; } frame = (void *)format + nformats * sizeof(*format); frame = PTR_ALIGN(frame, __alignof__(*frame)); interval = (void *)frame + nframes * sizeof(*frame); interval = PTR_ALIGN(interval, __alignof__(*interval)); streaming->formats = format; streaming->nformats = 0; /* Parse the format descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_DV: case UVC_VS_FORMAT_FRAME_BASED: ret = uvc_parse_format(dev, streaming, format, frame, &interval, buffer, buflen); if (ret < 0) goto error; if (!ret) break; streaming->nformats++; frame += format->nframes; format++; buflen -= ret; buffer += ret; continue; default: break; } buflen -= buffer[0]; buffer += buffer[0]; } if (buflen) uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has %u bytes of trailing descriptor garbage\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buflen); /* Parse the alternate settings to find the maximum bandwidth. */ for (i = 0; i < intf->num_altsetting; ++i) { struct usb_host_endpoint *ep; alts = &intf->altsetting[i]; ep = uvc_find_endpoint(alts, streaming->header.bEndpointAddress); if (ep == NULL) continue; psize = uvc_endpoint_max_bpi(dev->udev, ep); if (psize > streaming->maxpsize) streaming->maxpsize = psize; } list_add_tail(&streaming->list, &dev->streams); return 0; error: usb_driver_release_interface(&uvc_driver.driver, intf); uvc_stream_delete(streaming); return ret; } static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA; static const u8 uvc_gpio_guid[16] = UVC_GUID_EXT_GPIO_CONTROLLER; static const u8 uvc_media_transport_input_guid[16] = UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT; static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING; static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id, unsigned int num_pads, unsigned int extra_size) { struct uvc_entity *entity; unsigned int num_inputs; unsigned int size; unsigned int i; extra_size = roundup(extra_size, sizeof(*entity->pads)); if (num_pads) num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; else num_inputs = 0; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; entity = kzalloc(size, GFP_KERNEL); if (entity == NULL) return NULL; entity->id = id; entity->type = type; /* * Set the GUID for standard entity types. For extension units, the GUID * is initialized by the caller. */ switch (type) { case UVC_EXT_GPIO_UNIT: memcpy(entity->guid, uvc_gpio_guid, 16); break; case UVC_ITT_CAMERA: memcpy(entity->guid, uvc_camera_guid, 16); break; case UVC_ITT_MEDIA_TRANSPORT_INPUT: memcpy(entity->guid, uvc_media_transport_input_guid, 16); break; case UVC_VC_PROCESSING_UNIT: memcpy(entity->guid, uvc_processing_guid, 16); break; } entity->num_links = 0; entity->num_pads = num_pads; entity->pads = ((void *)(entity + 1)) + extra_size; for (i = 0; i < num_inputs; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; entity->bNrInPins = num_inputs; entity->baSourceID = (u8 *)(&entity->pads[num_pads]); return entity; } static void uvc_entity_set_name(struct uvc_device *dev, struct uvc_entity *entity, const char *type_name, u8 string_id) { int ret; /* * First attempt to read the entity name from the device. If the entity * has no associated string, or if reading the string fails (most * likely due to a buggy firmware), fall back to default names based on * the entity type. */ if (string_id) { ret = usb_string(dev->udev, string_id, entity->name, sizeof(entity->name)); if (!ret) return; } sprintf(entity->name, "%s %u", type_name, entity->id); } /* Parse vendor-specific extensions. */ static int uvc_parse_vendor_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct usb_host_interface *alts = dev->intf->cur_altsetting; struct uvc_entity *unit; unsigned int n, p; int handled = 0; switch (le16_to_cpu(dev->udev->descriptor.idVendor)) { case 0x046d: /* Logitech */ if (buffer[1] != 0x41 || buffer[2] != 0x01) break; /* * Logitech implements several vendor specific functions * through vendor specific extension units (LXU). * * The LXU descriptors are similar to XU descriptors * (see "USB Device Video Class for Video Devices", section * 3.7.2.6 "Extension Unit Descriptor") with the following * differences: * * ---------------------------------------------------------- * 0 bLength 1 Number * Size of this descriptor, in bytes: 24+p+n*2 * ---------------------------------------------------------- * 23+p+n bmControlsType N Bitmap * Individual bits in the set are defined: * 0: Absolute * 1: Relative * * This bitset is mapped exactly the same as bmControls. * ---------------------------------------------------------- * 23+p+n*2 bReserved 1 Boolean * ---------------------------------------------------------- * 24+p+n*2 iExtension 1 Index * Index of a string descriptor that describes this * extension unit. * ---------------------------------------------------------- */ p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 25 + p ? buffer[22+p] : 0; if (buflen < 25 + p + 2*n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); break; } unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3], p + 1, 2*n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); unit->extension.bmControlsType = (u8 *)unit + sizeof(*unit) + n; memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); uvc_entity_set_name(dev, unit, "Extension", buffer[24+p+2*n]); list_add_tail(&unit->list, &dev->entities); handled = 1; break; } return handled; } static int uvc_parse_standard_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct uvc_entity *unit, *term; struct usb_interface *intf; struct usb_host_interface *alts = dev->intf->cur_altsetting; unsigned int i, n, p, len; const char *type_name; u16 type; switch (buffer[2]) { case UVC_VC_HEADER: n = buflen >= 12 ? buffer[11] : 0; if (buflen < 12 + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d HEADER error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } dev->uvc_version = get_unaligned_le16(&buffer[3]); dev->clock_frequency = get_unaligned_le32(&buffer[7]); /* Parse all USB Video Streaming interfaces. */ for (i = 0; i < n; ++i) { intf = usb_ifnum_to_if(udev, buffer[12+i]); if (intf == NULL) { uvc_dbg(dev, DESCR, "device %d interface %d doesn't exists\n", udev->devnum, i); continue; } uvc_parse_streaming(dev, intf); } break; case UVC_VC_INPUT_TERMINAL: if (buflen < 8) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Reject invalid terminal types that would cause issues: * * - The high byte must be non-zero, otherwise it would be * confused with a unit. * * - Bit 15 must be 0, as we use it internally as a terminal * direction flag. * * Other unknown types are accepted. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } n = 0; p = 0; len = 8; if (type == UVC_ITT_CAMERA) { n = buflen >= 15 ? buffer[14] : 0; len = 15; } else if (type == UVC_ITT_MEDIA_TRANSPORT_INPUT) { n = buflen >= 9 ? buffer[8] : 0; p = buflen >= 10 + n ? buffer[9+n] : 0; len = 10; } if (buflen < len + n + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3], 1, n + p); if (term == NULL) return -ENOMEM; if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) { term->camera.bControlSize = n; term->camera.bmControls = (u8 *)term + sizeof(*term); term->camera.wObjectiveFocalLengthMin = get_unaligned_le16(&buffer[8]); term->camera.wObjectiveFocalLengthMax = get_unaligned_le16(&buffer[10]); term->camera.wOcularFocalLength = get_unaligned_le16(&buffer[12]); memcpy(term->camera.bmControls, &buffer[15], n); } else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) { term->media.bControlSize = n; term->media.bmControls = (u8 *)term + sizeof(*term); term->media.bTransportModeSize = p; term->media.bmTransportModes = (u8 *)term + sizeof(*term) + n; memcpy(term->media.bmControls, &buffer[9], n); memcpy(term->media.bmTransportModes, &buffer[10+n], p); } if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) type_name = "Camera"; else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) type_name = "Media"; else type_name = "Input"; uvc_entity_set_name(dev, term, type_name, buffer[7]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_OUTPUT_TERMINAL: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Make sure the terminal type MSB is not null, otherwise it * could be confused with a unit. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0xff00) == 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3], 1, 0); if (term == NULL) return -ENOMEM; memcpy(term->baSourceID, &buffer[7], 1); uvc_entity_set_name(dev, term, "Output", buffer[8]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_SELECTOR_UNIT: p = buflen >= 5 ? buffer[4] : 0; if (buflen < 5 || buflen < 6 + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d SELECTOR_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[5], p); uvc_entity_set_name(dev, unit, "Selector", buffer[5+p]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_PROCESSING_UNIT: n = buflen >= 8 ? buffer[7] : 0; p = dev->uvc_version >= 0x0110 ? 10 : 9; if (buflen < p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d PROCESSING_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[4], 1); unit->processing.wMaxMultiplier = get_unaligned_le16(&buffer[5]); unit->processing.bControlSize = buffer[7]; unit->processing.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->processing.bmControls, &buffer[8], n); if (dev->uvc_version >= 0x0110) unit->processing.bmVideoStandards = buffer[9+n]; uvc_entity_set_name(dev, unit, "Processing", buffer[8+n]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_EXTENSION_UNIT: p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 24 + p ? buffer[22+p] : 0; if (buflen < 24 + p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->extension.bmControls, &buffer[23+p], n); uvc_entity_set_name(dev, unit, "Extension", buffer[23+p+n]); list_add_tail(&unit->list, &dev->entities); break; default: uvc_dbg(dev, DESCR, "Found an unknown CS_INTERFACE descriptor (%u)\n", buffer[2]); break; } return 0; } static int uvc_parse_control(struct uvc_device *dev) { struct usb_host_interface *alts = dev->intf->cur_altsetting; const unsigned char *buffer = alts->extra; int buflen = alts->extralen; int ret; /* * Parse the default alternate setting only, as the UVC specification * defines a single alternate setting, the default alternate setting * zero. */ while (buflen > 2) { if (uvc_parse_vendor_control(dev, buffer, buflen) || buffer[1] != USB_DT_CS_INTERFACE) goto next_descriptor; ret = uvc_parse_standard_control(dev, buffer, buflen); if (ret < 0) return ret; next_descriptor: buflen -= buffer[0]; buffer += buffer[0]; } /*